entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
CosineSimilarity_custom
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3v/c3vyztzgw3iiqfspecgmyuijbvk6r675ogzi4x5mnjsc4kdyk3gp.py # Topologically Sorted Source Nodes: [cosine_similarity], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] # Source node to ATen node mapping: # cosine_similarity => clamp_min, clamp_min_1, div, div_1, mul, pow_1, pow_2, pow_3, pow_4, sum_1, sum_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %clamp_min), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %clamp_min_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {}) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x3), xmask) tmp17 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/25/c25qpwkdkb7p2sqprzwstqk3uxuf6n63kmah5qbi67gtdiiylms4.py # Topologically Sorted Source Nodes: [cosine_similarity, sub], Original ATen: [aten.sum, aten.rsub] # Source node to ATen node mapping: # cosine_similarity => sum_3 # sub => sub # Graph fragment: # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sum_3), kwargs = {}) triton_poi_fused_rsub_sum_1 = async_compile.triton('triton_poi_fused_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_rsub_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_similarity], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_similarity, sub], Original ATen: [aten.sum, aten.rsub] triton_poi_fused_rsub_sum_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class CosineSimilarity_custom(nn.Module): def __init__(self, dim: 'int'=1, eps: 'float'=1e-08): super(CosineSimilarity_custom, self).__init__() self.dim = dim self.eps = eps def forward(self, x1, x2): return 1 - F.cosine_similarity(x1, x2, self.dim, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x3, xmask) tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) @triton.jit def triton_poi_fused_rsub_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)]( arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_rsub_sum_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class CosineSimilarity_customNew(nn.Module): def __init__(self, dim: 'int'=1, eps: 'float'=1e-08): super(CosineSimilarity_customNew, self).__init__() self.dim = dim self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Lhx94As/PHO-LID
CosineSimilarity_custom
false
5,518
[ "MIT" ]
1
44843b25b977dd6e0b77b520dbe3f2ff1ea633cd
https://github.com/Lhx94As/PHO-LID/tree/44843b25b977dd6e0b77b520dbe3f2ff1ea633cd
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, dim: 'int'=1, eps: 'float'=1e-08): super().__init__() self.dim = dim self.eps = eps def forward(self, x1, x2): return 1 - F.cosine_similarity(x1, x2, self.dim, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Symmetric
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/4f/c4fekrlplou6jqs7d22hwo63cvbbodj3arj7a5rnr62q3dnsdcpp.py # Topologically Sorted Source Nodes: [triu, add], Original ATen: [aten.triu, aten.add] # Source node to ATen node mapping: # add => add # triu => full_default, ge, sub, where # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%sub, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %arg0_1, %full_default), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %permute), kwargs = {}) triton_poi_fused_add_triu_0 = async_compile.triton('triton_poi_fused_add_triu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_triu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_triu_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y3 = yindex y1 = (yindex // 4) tmp3 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp0 = x2 + ((-1)*y0) tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = y0 + ((-1)*x2) tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp6 >= tmp7 tmp10 = tl.where(tmp8, tmp9, tmp4) tmp11 = tmp5 + tmp10 tl.store(out_ptr0 + (x2 + (4*y3)), tmp11, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [triu, add], Original ATen: [aten.triu, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_triu_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.quantization import torch.onnx import torch.nn.parallel import torch.utils.data import torch.fx import torch.nn import torch.optim import torch.profiler class Symmetric(nn.Module): def forward(self, X): return X.triu() + X.triu(1).transpose(-1, -2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.quantization import torch.onnx import torch.nn.parallel import torch.utils.data import torch.fx import torch.nn import torch.optim import torch.profiler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_triu_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y3 = yindex y1 = yindex // 4 tmp3 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp0 = x2 + -1 * y0 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = y0 + -1 * x2 tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp6 >= tmp7 tmp10 = tl.where(tmp8, tmp9, tmp4) tmp11 = tmp5 + tmp10 tl.store(out_ptr0 + (x2 + 4 * y3), tmp11, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_triu_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 return buf0, class SymmetricNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Lezcano/tutorials
Symmetric
false
5,519
[ "BSD-3-Clause" ]
1
24946b2e6d3d825afed6b35c1c4d618a70a88be8
https://github.com/Lezcano/tutorials/tree/24946b2e6d3d825afed6b35c1c4d618a70a88be8
import torch import torch.nn as nn import torch.quantization import torch.onnx import torch.nn.parallel import torch.utils.data import torch.fx import torch.nn import torch.optim import torch.profiler class Model(nn.Module): def forward(self, X): return X.triu() + X.triu(1).transpose(-1, -2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Readout
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ig/cig776c5vnkodl5ylq5rpdauwikblftbceldk4vqgy3ytpdyzgbf.py # Topologically Sorted Source Nodes: [x, y], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # x => mul # y => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %unsqueeze), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, y], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.utils.data def aggregate(x, dim, aggr='add', mask=None, keepdim=False): """ Args: x: (..., A, ..., F), Features to be aggregated. mask: (..., A, ...) Returns: (..., , ..., F), if keepdim == False (..., 1, ..., F), if keepdim == True """ assert aggr in ('add', 'mean') if mask is not None: x = x * mask.unsqueeze(-1) y = torch.sum(x, dim=dim, keepdim=keepdim) if aggr == 'mean': if mask is not None: n = torch.sum(mask, dim=dim, keepdim=keepdim) n = torch.max(n, other=torch.ones_like(n)) else: n = x.size(dim) y = y / n return y def readout(x, mask, aggr='add'): """ Args: x: (B, N_max, F) mask: (B, N_max) Returns: (B, F) """ return aggregate(x=x, dim=1, aggr=aggr, mask=mask, keepdim=False) class Readout(Module): def __init__(self, aggr='add'): super().__init__() assert aggr in ('add', 'mean') self.aggr = aggr def forward(self, x, mask): return readout(x, mask=mask, aggr=self.aggr) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = xindex // 4 % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def aggregate(x, dim, aggr='add', mask=None, keepdim=False): """ Args: x: (..., A, ..., F), Features to be aggregated. mask: (..., A, ...) Returns: (..., , ..., F), if keepdim == False (..., 1, ..., F), if keepdim == True """ assert aggr in ('add', 'mean') if mask is not None: x = x * mask.unsqueeze(-1) y = torch.sum(x, dim=dim, keepdim=keepdim) if aggr == 'mean': if mask is not None: n = torch.sum(mask, dim=dim, keepdim=keepdim) n = torch.max(n, other=torch.ones_like(n)) else: n = x.size(dim) y = y / n return y def readout(x, mask, aggr='add'): """ Args: x: (B, N_max, F) mask: (B, N_max) Returns: (B, F) """ return aggregate(x=x, dim=1, aggr=aggr, mask=mask, keepdim=False) class ReadoutNew(Module): def __init__(self, aggr='add'): super().__init__() assert aggr in ('add', 'mean') self.aggr = aggr def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
LichenYang-Jeffrey/GAT-for-COVID-19
Readout
false
5,520
[ "MIT" ]
1
91cc6048f14856f3ef9dfebf2db45e2a36975159
https://github.com/LichenYang-Jeffrey/GAT-for-COVID-19/tree/91cc6048f14856f3ef9dfebf2db45e2a36975159
from torch.nn import Module import torch import torch.utils.data def aggregate(x, dim, aggr='add', mask=None, keepdim=False): """ Args: x: (..., A, ..., F), Features to be aggregated. mask: (..., A, ...) Returns: (..., , ..., F), if keepdim == False (..., 1, ..., F), if keepdim == True """ assert aggr in ('add', 'mean') if mask is not None: x = x * mask.unsqueeze(-1) y = torch.sum(x, dim=dim, keepdim=keepdim) if aggr == 'mean': if mask is not None: n = torch.sum(mask, dim=dim, keepdim=keepdim) n = torch.max(n, other=torch.ones_like(n)) else: n = x.size(dim) y = y / n return y def readout(x, mask, aggr='add'): """ Args: x: (B, N_max, F) mask: (B, N_max) Returns: (B, F) """ return aggregate(x=x, dim=1, aggr=aggr, mask=mask, keepdim=False) class Model(Module): def __init__(self, aggr='add'): super().__init__() assert aggr in ('add', 'mean') self.aggr = aggr def forward(self, x, mask): return readout(x, mask=mask, aggr=self.aggr) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AdaINConv2dLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wi/cwixe2mvtbslnlduj6iu3ijkrf5snrdwvhhqsvnunr7zp3pbtb6h.py # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mv => mul, sum_1 # norm => pow_1, pow_2, sum_2 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mv_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mv_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mv_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp26, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zj/czj3q7hwubmlpr6euena2hgafqa2zyahzg2ovq7jfcmwd7h7pead.py # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] # Source node to ATen node mapping: # mv_1 => mul_1, sum_3 # truediv => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {}) # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) triton_per_fused_div_mv_1 = async_compile.triton('triton_per_fused_div_mv_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mv_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mv_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vy/cvykqyjomwxei4fyropo4jqebs4lj5nohnzcebbnfyn5byyue7bz.py # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add_1 => add_1 # norm_1 => pow_3, pow_4, sum_4 # truediv_1 => div_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add_1), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_2 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_2(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/px/cpxcgb5q54ixp24egg5wrifmrdsxkmow6sly6c226q3lm2gvyou5.py # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] # Source node to ATen node mapping: # sigma => mul_3, sum_6 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sum_3), kwargs = {}) # %sum_6 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {}) triton_per_fused_dot_3 = async_compile.triton('triton_per_fused_dot_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_dot_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_dot_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/op/cop3qrnpelmxzprgtvtmh4w3aypjouw7zyvmylhriwz2v7g6bddm.py # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_2 => div_2 # Graph fragment: # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_4, %expand), kwargs = {}) triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ia/ciaqbxaapwdirvjneo7orzmp3nsd523g2ea3cyzdaow6bnehgvuj.py # Topologically Sorted Source Nodes: [x_1, x_2, add_2, var_2, x_3, mul_2, content, x_4], Original ATen: [aten.convolution, aten.sub, aten.add, aten.rsqrt, aten.mul, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_2 => add_2 # content => add_3 # mul_2 => mul_6 # var_2 => rsqrt # x_1 => convolution # x_2 => sub # x_3 => mul_5 # x_4 => gt, mul_7, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %div_2, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %view_4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, 1e-08), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %select), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %select_1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_3, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_3, %mul_7), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_mul_rsqrt_sub_5 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_mul_rsqrt_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_mul_rsqrt_sub_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_mul_rsqrt_sub_5(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (x0 + (8*x1)), xmask) tmp14 = tl.load(in_ptr1 + (4 + x0 + (8*x1)), xmask) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 / tmp3 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp6 / tmp3 tmp8 = 1e-08 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp5 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = 0.0 tmp17 = tmp15 > tmp16 tmp18 = 0.2 tmp19 = tmp15 * tmp18 tmp20 = tl.where(tmp17, tmp15, tmp19) tmp21 = tmp20 > tmp16 tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (32, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, ), (1, ), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse buf17 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_mv_0.run(buf2, primals_4, primals_2, buf0, buf17, 1, 64, grid=grid(1), stream=stream0) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] triton_per_fused_div_mv_1.run(primals_4, buf0, buf2, buf3, 4, 64, grid=grid(4), stream=stream0) buf5 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_2.run(buf3, buf5, 1, 4, grid=grid(1), stream=stream0) buf6 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] triton_per_fused_dot_3.run(buf5, buf3, buf6, 1, 4, grid=grid(1), stream=stream0) del buf3 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] triton_poi_fused_div_4.run(primals_4, buf6, buf7, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(primals_1, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (256, 4, 1, 1), (4, 1, 1, 1)) buf9 = buf8; del buf8 # reuse buf10 = empty_strided_cuda((256, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((256, 4, 1, 1), (4, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1, x_2, add_2, var_2, x_3, mul_2, content, x_4], Original ATen: [aten.convolution, aten.sub, aten.add, aten.rsqrt, aten.mul, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_mul_rsqrt_sub_5.run(buf9, primals_5, primals_6, buf10, buf11, 1024, grid=grid(1024), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [], Original ATen: [] buf12 = torch.ops.aten.set_.source_Tensor(primals_2, buf5) assert_size_stride(buf12, (4, ), (1, )) del buf0 # Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div] buf18 = torch.ops.aten.set_.source_Tensor(primals_3, buf17) assert_size_stride(buf18, (64, ), (1, )) del primals_3 return (buf10, buf7, primals_1, primals_2, primals_4, buf2, buf5, buf6, buf7, buf9, reinterpret_tensor(primals_6, (256, 4, 1, 1), (8, 1, 1, 1), 0), buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super(AdaptiveInstanceNorm2d, self).__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super(AdaINConv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x, style): x = self.pad(x) x = self.conv2d(x) x = self.norm(x, style) if self.activation: x = self.activation(x) return x def get_inputs(): return [torch.rand([256, 4, 4, 4]), torch.rand([32, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None) @triton.jit def triton_per_fused_div_mv_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_2(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) @triton.jit def triton_per_fused_dot_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused_div_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_mul_rsqrt_sub_5( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask) tmp14 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 / tmp3 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp6 / tmp3 tmp8 = 1e-08 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp5 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = 0.0 tmp17 = tmp15 > tmp16 tmp18 = 0.2 tmp19 = tmp15 * tmp18 tmp20 = tl.where(tmp17, tmp15, tmp19) tmp21 = tmp20 > tmp16 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (32, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 buf17 = empty_strided_cuda((64,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_mv_0[grid(1)](buf2, primals_4, primals_2, buf0, buf17, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_div_mv_1[grid(4)](primals_4, buf0, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_2[grid(1)](buf3, buf5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((), (), torch.float32) triton_per_fused_dot_3[grid(1)](buf5, buf3, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf3 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_4[grid(256)](primals_4, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(primals_1, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (256, 4, 1, 1), (4, 1, 1, 1)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((256, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((256, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_mul_rsqrt_sub_5[ grid(1024)](buf9, primals_5, primals_6, buf10, buf11, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf12 = torch.ops.aten.set_.source_Tensor(primals_2, buf5) assert_size_stride(buf12, (4,), (1,)) del buf0 buf18 = torch.ops.aten.set_.source_Tensor(primals_3, buf17) assert_size_stride(buf18, (64,), (1,)) del primals_3 return (buf10, buf7, primals_1, primals_2, primals_4, buf2, buf5, buf6, buf7, buf9, reinterpret_tensor(primals_6, (256, 4, 1, 1), (8, 1, 1, 1), 0), buf11) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super(AdaptiveInstanceNorm2d, self).__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super(AdaINConv2dLayerNew, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, input_0, input_1): primals_2 = self.conv2d.module.bias primals_5 = self.conv2d.module.weight_u primals_3 = self.conv2d.module.weight_v primals_4 = self.conv2d.module.weight_bar primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
LenKerr/Semantic-Colorization-GAN
AdaINConv2dLayer
false
5,521
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super().__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigm # ... truncated (>4000 chars) for memory efficiency
TokenEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wi/cwibqvrnbfx7xhnfzzckhfwxbmmaeepyx4l2irzdxw23feqjr3lp.py # Topologically Sorted Source Nodes: [long], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # long => convert_element_type # Graph fragment: # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ia/ciafxdq32uqzkpbws275y3bp3mee3juggqv7sqnd2mxb3zrxr2oq.py # Topologically Sorted Source Nodes: [embedding, mul], Original ATen: [aten.embedding, aten.mul] # Source node to ATen node mapping: # embedding => embedding # mul => mul # Graph fragment: # %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %convert_element_type), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%embedding, 2.0), kwargs = {}) triton_poi_fused_embedding_mul_1 = async_compile.triton('triton_poi_fused_embedding_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_embedding_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask) tmp7 = 2.0 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [long], Original ATen: [aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [embedding, mul], Original ATen: [aten.embedding, aten.mul] triton_poi_fused_embedding_mul_1.run(buf0, primals_2, buf1, 1024, grid=grid(1024), stream=stream0) del primals_2 return (buf1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor import torch.nn as nn import torch.quantization import torch.onnx import torch.nn.parallel import torch.utils.data import torch.fx import torch.nn import torch.optim import torch.profiler class TokenEmbedding(nn.Module): def __init__(self, vocab_size: 'int', emb_size): super(TokenEmbedding, self).__init__() self.embedding = nn.Embedding(vocab_size, emb_size) self.emb_size = emb_size def forward(self, tokens: 'Tensor'): return self.embedding(tokens.long()) * math.sqrt(self.emb_size) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'vocab_size': 4, 'emb_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.quantization import torch.onnx import torch.nn.parallel import torch.utils.data import torch.fx import torch.nn import torch.optim import torch.profiler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_embedding_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask) tmp7 = 2.0 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_embedding_mul_1[grid(1024)](buf0, primals_2, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, buf0 class TokenEmbeddingNew(nn.Module): def __init__(self, vocab_size: 'int', emb_size): super(TokenEmbeddingNew, self).__init__() self.embedding = nn.Embedding(vocab_size, emb_size) self.emb_size = emb_size def forward(self, input_0): primals_2 = self.embedding.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Lezcano/tutorials
TokenEmbedding
false
5,522
[ "BSD-3-Clause" ]
1
24946b2e6d3d825afed6b35c1c4d618a70a88be8
https://github.com/Lezcano/tutorials/tree/24946b2e6d3d825afed6b35c1c4d618a70a88be8
import math import torch from torch import Tensor import torch.nn as nn import torch.quantization import torch.onnx import torch.nn.parallel import torch.utils.data import torch.fx import torch.nn import torch.optim import torch.profiler class Model(nn.Module): def __init__(self, vocab_size: 'int', emb_size): super().__init__() self.embedding = nn.Embedding(vocab_size, emb_size) self.emb_size = emb_size def forward(self, tokens: 'Tensor'): return self.embedding(tokens.long()) * math.sqrt(self.emb_size) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/tc/ctcoccnojrifwsjhb4gqgfu5kxpt6dvdpv4qwca7cbgn27ktptbk.py # Topologically Sorted Source Nodes: [mean, std, sub, add, ln_out, mul, ln_out_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # ln_out => div # ln_out_1 => add_1 # mean => mean # mul => mul # std => sqrt, var # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-06 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, ln_out, mul, ln_out_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, d_hid, eps=1e-06): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, x): mean = x.mean(dim=-1, keepdim=True) std = x.std(dim=-1, keepdim=True) ln_out = (x - mean) / (std + self.eps) ln_out = self.gamma * ln_out + self.beta return ln_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_hid': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-06 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, d_hid, eps=1e-06): super(LayerNormNew, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Lhx94As/PHO-LID
LayerNorm
false
5,523
[ "MIT" ]
1
44843b25b977dd6e0b77b520dbe3f2ff1ea633cd
https://github.com/Lhx94As/PHO-LID/tree/44843b25b977dd6e0b77b520dbe3f2ff1ea633cd
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, d_hid, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, x): mean = x.mean(dim=-1, keepdim=True) std = x.std(dim=-1, keepdim=True) ln_out = (x - mean) / (std + self.eps) ln_out = self.gamma * ln_out + self.beta return ln_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ResAdaINConv2dLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/o5/co5utrqsbdulrbygkmgbtnft3s33dxmhlcrl7llsvtgrwilyozcn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10, other=0.0) tl.store(out_ptr0 + (x4), tmp11, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pj/cpj6t7ch5tbjax2t7isz2rrooc27ef6tcwdhv2q3coqhpluuaotk.py # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mv => mul, sum_1 # norm => pow_1, pow_2, sum_2 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mv_1 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mv_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mv_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (36 + r0), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (72 + r0), rmask, other=0.0) tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (108 + r0), rmask, other=0.0) tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-12 tmp26 = tmp24 + tmp25 tmp27 = tmp18 / tmp26 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp18, rmask) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp27, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/og/cogzn2aqt2zqefp3tgxavposfk6wg5gaj3zipeiyyawkpex6e6wf.py # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] # Source node to ATen node mapping: # mv_1 => mul_1, sum_3 # truediv => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {}) # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) triton_per_fused_div_mv_2 = async_compile.triton('triton_per_fused_div_mv_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mv_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (36*x0)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qa/cqaed4ios3xqwlv4d3cciikkdz7d73vhwkegurd5cxca3y7htmvg.py # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add_1 => add_1 # norm_1 => pow_3, pow_4, sum_4 # truediv_1 => div_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add_1), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_3 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/c2/cc2arficwjs4sforhl25gdfmb3uzfg7hkw46gq3mxgv57jy52z32.py # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] # Source node to ATen node mapping: # sigma => mul_3, sum_6 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sum_3), kwargs = {}) # %sum_6 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {}) triton_per_fused_dot_4 = async_compile.triton('triton_per_fused_dot_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_dot_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sb/csbqzjhtj4wya4qmyds7hrs7i55ypvfew7naorg3uvb7egvaiqs2.py # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_2 => div_2 # Graph fragment: # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_4, %expand), kwargs = {}) triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uz/cuzeit4gmkb3dfi574sa6fq5xo72c2sqtw2e25dzqcvoo6tjrh6l.py # Topologically Sorted Source Nodes: [x_1, mean, x_2, mean_2, add_2, var_2, x_3, mul_2, content, x_4, out], Original ATen: [aten.convolution, aten.mean, aten.sub, aten.add, aten.rsqrt, aten.mul, aten.leaky_relu] # Source node to ATen node mapping: # add_2 => add_2 # content => add_3 # mean => mean # mean_2 => mean_1 # mul_2 => mul_6 # out => add_4 # var_2 => rsqrt # x_1 => convolution # x_2 => sub # x_3 => mul_5 # x_4 => gt, mul_7, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %div_2, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_3, [2]), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %view_4), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_5, [2]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %select), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %select_1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_3, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_3, %mul_7), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %primals_1), kwargs = {}) triton_per_fused_add_convolution_leaky_relu_mean_mul_rsqrt_sub_6 = async_compile.triton('triton_per_fused_add_convolution_leaky_relu_mean_mul_rsqrt_sub_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1024, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_convolution_leaky_relu_mean_mul_rsqrt_sub_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_convolution_leaky_relu_mean_mul_rsqrt_sub_6(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1024 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (x0 + (8*x1)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (4 + x0 + (8*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (r2 + (16*x3)), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp14 / tmp7 tmp16 = 1e-08 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tmp19 = tmp9 * tmp18 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.2 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tmp30 = tmp28 + tmp29 tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp8, xmask) tl.debug_barrier() tl.store(in_out_ptr2 + (x3), tmp18, xmask) tl.store(in_out_ptr3 + (r2 + (16*x3)), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (36, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (32, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 36864, grid=grid(36864), stream=stream0) buf1 = empty_strided_cuda((36, ), (1, ), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse buf22 = empty_strided_cuda((36, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mv_1.run(buf3, primals_4, primals_2, buf1, buf22, 1, 36, grid=grid(1), stream=stream0) buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] triton_per_fused_div_mv_2.run(primals_4, buf1, buf3, buf4, 4, 36, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_3.run(buf4, buf6, 1, 4, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] triton_per_fused_dot_4.run(buf6, buf4, buf7, 1, 4, grid=grid(1), stream=stream0) del buf4 buf8 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] triton_poi_fused_div_5.run(primals_4, buf7, buf8, 144, grid=grid(144), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (256, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9; del buf9 # reuse buf11 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf12 = buf11; del buf11 # reuse buf13 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf14 = reinterpret_tensor(buf13, (256, 4, 1, 1), (4, 1, 1, 1), 0); del buf13 # reuse buf15 = empty_strided_cuda((256, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [x_1, mean, x_2, mean_2, add_2, var_2, x_3, mul_2, content, x_4, out], Original ATen: [aten.convolution, aten.mean, aten.sub, aten.add, aten.rsqrt, aten.mul, aten.leaky_relu] triton_per_fused_add_convolution_leaky_relu_mean_mul_rsqrt_sub_6.run(buf10, buf12, buf14, buf16, primals_5, primals_6, primals_1, 1024, 16, grid=grid(1024), stream=stream0) del primals_1 del primals_5 # Topologically Sorted Source Nodes: [], Original ATen: [] buf17 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf17, (4, ), (1, )) del buf1 # Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div] buf23 = torch.ops.aten.set_.source_Tensor(primals_3, buf22) assert_size_stride(buf23, (36, ), (1, )) del primals_3 return (buf16, buf8, primals_2, primals_4, primals_6, buf0, buf3, buf6, buf7, buf8, buf10, reinterpret_tensor(buf12, (256, 4, 1, 1), (4, 1, 1, 1), 0), buf14, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((36, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super(AdaptiveInstanceNorm2d, self).__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super(AdaINConv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x, style): x = self.pad(x) x = self.conv2d(x) x = self.norm(x, style) if self.activation: x = self.activation(x) return x class ResAdaINConv2dLayer(nn.Module): def __init__(self, channels, kernel_size=3, stride=1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm='none', sn=True): super(ResAdaINConv2dLayer, self).__init__() self.conv2d = AdaINConv2dLayer(channels, channels, kernel_size, stride, padding, dilation, pad_type, activation, sn) def forward(self, x, style): residual = x out = self.conv2d(x, style) out = out + residual return out def get_inputs(): return [torch.rand([256, 4, 4, 4]), torch.rand([32, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10, other=0.0) tl.store(out_ptr0 + x4, tmp11, None) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (36 + r0), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (72 + r0), rmask, other=0.0) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (108 + r0), rmask, other=0.0) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-12 tmp26 = tmp24 + tmp25 tmp27 = tmp18 / tmp26 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, rmask) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp27, rmask) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 36 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0 ) tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused_add_convolution_leaky_relu_mean_mul_rsqrt_sub_6( in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 1024 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + (r2 + 16 * x3), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp14 / tmp7 tmp16 = 1e-08 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tmp19 = tmp9 * tmp18 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.2 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tmp30 = tmp28 + tmp29 tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp8, xmask) tl.debug_barrier() tl.store(in_out_ptr2 + x3, tmp18, xmask) tl.store(in_out_ptr3 + (r2 + 16 * x3), tmp30, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (36,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (32, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 4, 6, 6), (144, 36, 6, 1), torch. float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(36864)](primals_1, buf0, 36864, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((36,), (1,), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 buf22 = empty_strided_cuda((36,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf3, primals_4, primals_2, buf1, buf22, 1, 36, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_div_mv_2[grid(4)](primals_4, buf1, buf3, buf4, 4, 36, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf4, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_dot_4[grid(1)](buf6, buf4, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf4 buf8 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_div_5[grid(144)](primals_4, buf7, buf8, 144, XBLOCK=256, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (256, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf12 = buf11 del buf11 buf13 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf14 = reinterpret_tensor(buf13, (256, 4, 1, 1), (4, 1, 1, 1), 0) del buf13 buf15 = empty_strided_cuda((256, 4, 4, 4), (64, 16, 4, 1), torch. float32) buf16 = buf15 del buf15 triton_per_fused_add_convolution_leaky_relu_mean_mul_rsqrt_sub_6[grid (1024)](buf10, buf12, buf14, buf16, primals_5, primals_6, primals_1, 1024, 16, XBLOCK=32, num_warps=4, num_stages=1) del primals_1 del primals_5 buf17 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf17, (4,), (1,)) del buf1 buf23 = torch.ops.aten.set_.source_Tensor(primals_3, buf22) assert_size_stride(buf23, (36,), (1,)) del primals_3 return (buf16, buf8, primals_2, primals_4, primals_6, buf0, buf3, buf6, buf7, buf8, buf10, reinterpret_tensor(buf12, (256, 4, 1, 1), (4, 1, 1, 1), 0), buf14) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super(AdaptiveInstanceNorm2d, self).__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super(AdaINConv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x, style): x = self.pad(x) x = self.conv2d(x) x = self.norm(x, style) if self.activation: x = self.activation(x) return x class ResAdaINConv2dLayerNew(nn.Module): def __init__(self, channels, kernel_size=3, stride=1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm='none', sn=True): super(ResAdaINConv2dLayerNew, self).__init__() self.conv2d = AdaINConv2dLayer(channels, channels, kernel_size, stride, padding, dilation, pad_type, activation, sn) def forward(self, input_0, input_1): primals_2 = self.conv2d.conv2d.module.bias primals_5 = self.conv2d.conv2d.module.weight_u primals_3 = self.conv2d.conv2d.module.weight_v primals_4 = self.conv2d.conv2d.module.weight_bar primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
LenKerr/Semantic-Colorization-GAN
ResAdaINConv2dLayer
false
5,524
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super().__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activatio # ... truncated (>4000 chars) for memory efficiency
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ji/cji7mw45fbdoanjc5e6qu3e2bf5d6jnnjabskl6onjlk7uv7oqud.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x_2 => add # x_3 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xp/cxpe6vtgpqizlfgu2p5hfehvzox7zp73wjlhkybotxh42njvnais.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x_2 => add # x_3 => add_1, add_2, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf6, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(buf2, primals_1, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_2.run(buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0) del buf3 del buf4 del primals_7 return (buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x = self.dropout(x) x += residual del residual x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_3, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_1, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_1, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
LiJiaBei-7/rivrl
PositionwiseFeedForward
false
5,525
[ "Apache-2.0" ]
1
f6078e4826c788704bb338d7a695ef879ff969f4
https://github.com/LiJiaBei-7/rivrl/tree/f6078e4826c788704bb338d7a695ef879ff969f4
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init class Model(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in, eps=1e-06) self.dropout = nn.Dropout(dropout) def forward(self, x): residual = x x = self.w_2(F.relu(self.w_1(x))) x = self.dropout(x) x += residual del residual x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
DenseConv2dLayer_5C
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdv3m5a33kovvtng5iilth4k6mtnyfcota6hhwoiqm34iumu7wi.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/er/cerxfmiafmpnsn44ihnsss2pvd76g6x2i62qavnas5ygqo5mj43d.py # Topologically Sorted Source Nodes: [cat, x_3], Original ATen: [aten.cat, aten.constant_pad_nd] # Source node to ATen node mapping: # cat => cat # x_3 => constant_pad_nd_1 # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where], 1), kwargs = {}) # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_cat_constant_pad_nd_1 = async_compile.triton('triton_poi_fused_cat_constant_pad_nd_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_constant_pad_nd_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_constant_pad_nd_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) % 8 x3 = (xindex // 288) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp12 = tmp11 >= tmp1 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2) + (64*x3)), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp10 tmp20 = tl.load(in_ptr1 + ((-5) + x0 + (4*x1) + (16*((-4) + x2)) + (64*x3)), tmp19 & xmask, other=0.0) tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.2 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp19, tmp25, tmp26) tmp28 = tl.where(tmp13, tmp15, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp10, tmp28, tmp29) tl.store(out_ptr0 + (x6), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s3/cs34ah7ioinuak53u3oyhgsrsqeqfnovsb4djwnzfgkw44v6u547.py # Topologically Sorted Source Nodes: [cat_1, x_6], Original ATen: [aten.cat, aten.constant_pad_nd] # Source node to ATen node mapping: # cat_1 => cat_1 # x_6 => constant_pad_nd_2 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1], 1), kwargs = {}) # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_cat_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_cat_constant_pad_nd_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) % 12 x3 = (xindex // 432) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp12 = tmp11 >= tmp1 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2) + (64*x3)), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp19 & tmp10 tmp21 = tl.load(in_ptr1 + ((-5) + x0 + (4*x1) + (16*((-4) + x2)) + (64*x3)), tmp20 & xmask, other=0.0) tmp22 = 0.0 tmp23 = tmp21 > tmp22 tmp24 = 0.2 tmp25 = tmp21 * tmp24 tmp26 = tl.where(tmp23, tmp21, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp20, tmp26, tmp27) tmp29 = tmp11 >= tmp17 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp11 < tmp30 tmp32 = tmp29 & tmp10 tmp33 = tl.load(in_ptr2 + ((-5) + x0 + (4*x1) + (16*((-8) + x2)) + (64*x3)), tmp32 & xmask, other=0.0) tmp34 = tmp33 > tmp22 tmp35 = tmp33 * tmp24 tmp36 = tl.where(tmp34, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp32, tmp36, tmp37) tmp39 = tl.where(tmp19, tmp28, tmp38) tmp40 = tl.where(tmp13, tmp15, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp10, tmp40, tmp41) tl.store(out_ptr0 + (x6), tmp42, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2h/c2hdtntquyhqkvcjcbctuzny5hhijks5jcqplynkbwfrthtih25e.py # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2], 1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 16 x0 = xindex % 16 x2 = (xindex // 256) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tmp29 = tl.full([1], 16, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tl.load(in_ptr3 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp28 & xmask, other=0.0) tmp32 = tmp31 > tmp11 tmp33 = tmp31 * tmp13 tmp34 = tl.where(tmp32, tmp31, tmp33) tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp28, tmp34, tmp35) tmp37 = tl.where(tmp21, tmp27, tmp36) tmp38 = tl.where(tmp9, tmp17, tmp37) tmp39 = tl.where(tmp4, tmp5, tmp38) tl.store(out_ptr0 + (x3), tmp39, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rc/crcdywpxyr2fxsveen5yolsghlkstdms4uvwp7zkfwhcmkjmpxpg.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_9 => constant_pad_nd_3 # Graph fragment: # %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat_2, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_4 = async_compile.triton('triton_poi_fused_constant_pad_nd_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4p/c4pwo5hnpxyletkwji2bfgdsgviax75psfwuih5qzyaah2skdcdn.py # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2, %where_3], 1), kwargs = {}) triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 20 x0 = xindex % 16 x2 = (xindex // 320) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tmp29 = tl.full([1], 16, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr3 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp31 & xmask, other=0.0) tmp33 = tmp32 > tmp11 tmp34 = tmp32 * tmp13 tmp35 = tl.where(tmp33, tmp32, tmp34) tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp31, tmp35, tmp36) tmp38 = tmp0 >= tmp29 tmp39 = tl.full([1], 20, tl.int64) tmp40 = tmp0 < tmp39 tmp41 = tl.load(in_ptr4 + (x0 + (16*((-16) + x1)) + (64*x2)), tmp38 & xmask, other=0.0) tmp42 = tmp41 > tmp11 tmp43 = tmp41 * tmp13 tmp44 = tl.where(tmp42, tmp41, tmp43) tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp38, tmp44, tmp45) tmp47 = tl.where(tmp31, tmp37, tmp46) tmp48 = tl.where(tmp21, tmp27, tmp47) tmp49 = tl.where(tmp9, tmp17, tmp48) tmp50 = tl.where(tmp4, tmp5, tmp49) tl.store(out_ptr0 + (x3), tmp50, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s7/cs7remm7dxallug7mpmvc7novi2rckl5n32tcwbq5syx4cb4lqk6.py # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_12 => constant_pad_nd_4 # Graph fragment: # %constant_pad_nd_4 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat_3, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_6 = async_compile.triton('triton_poi_fused_constant_pad_nd_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2880 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fz/cfzioooyib2f5ephr2m4wpireief66w5kf7iwt5ay6lq5g5n4k5g.py # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_14 => gt_4, mul_4, where_4 # Graph fragment: # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_4, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_7 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_7(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(in_out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/is/cisodw5gxdmlgapnrqpu7bcsl5srnastcmg2c73tt3lqfqu2bmk6.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_11 => gt_3, mul_3, where_3 # Graph fragment: # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_8 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_5, (4, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_6, (4, 20, 3, 3), (180, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [cat, x_3], Original ATen: [aten.cat, aten.constant_pad_nd] triton_poi_fused_cat_constant_pad_nd_1.run(primals_1, buf1, buf2, 1152, grid=grid(1152), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1, x_6], Original ATen: [aten.cat, aten.constant_pad_nd] triton_poi_fused_cat_constant_pad_nd_2.run(primals_1, buf1, buf3, buf4, 1728, grid=grid(1728), stream=stream0) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(primals_1, buf1, buf3, buf5, buf6, 1024, grid=grid(1024), stream=stream0) buf7 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_4.run(buf6, buf7, 2304, grid=grid(2304), stream=stream0) del buf6 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] triton_poi_fused_cat_5.run(primals_1, buf1, buf3, buf5, buf8, buf9, 1280, grid=grid(1280), stream=stream0) del primals_1 buf10 = empty_strided_cuda((4, 20, 6, 6), (720, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_6.run(buf9, buf10, 2880, grid=grid(2880), stream=stream0) del buf9 # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = buf11; del buf11 # reuse buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_7.run(buf12, buf13, 256, grid=grid(256), stream=stream0) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf8, buf14, 256, grid=grid(256), stream=stream0) del buf8 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf5, buf15, 256, grid=grid(256), stream=stream0) del buf5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf3, buf16, 256, grid=grid(256), stream=stream0) del buf3 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf1, buf17, 256, grid=grid(256), stream=stream0) del buf1 return (buf12, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf2, buf4, buf7, buf10, buf13, buf14, buf15, buf16, buf17, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class DenseConv2dLayer_5C(nn.Module): def __init__(self, in_channels, latent_channels, kernel_size=3, stride= 1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm ='none', sn=False): super(DenseConv2dLayer_5C, self).__init__() self.conv1 = Conv2dLayer(in_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv2 = Conv2dLayer(in_channels + latent_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv3 = Conv2dLayer(in_channels + latent_channels * 2, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv4 = Conv2dLayer(in_channels + latent_channels * 3, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv5 = Conv2dLayer(in_channels + latent_channels * 4, in_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, x): x1 = self.conv1(x) x2 = self.conv2(torch.cat((x, x1), 1)) x3 = self.conv3(torch.cat((x, x1, x2), 1)) x4 = self.conv4(torch.cat((x, x1, x2, x3), 1)) x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) return x5 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'latent_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_cat_constant_pad_nd_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 % 8 x3 = xindex // 288 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2 + 64 * x3), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tl.full([1], 8, tl.int64) tmp19 = tmp16 & tmp10 tmp20 = tl.load(in_ptr1 + (-5 + x0 + 4 * x1 + 16 * (-4 + x2) + 64 * x3), tmp19 & xmask, other=0.0) tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.2 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp19, tmp25, tmp26) tmp28 = tl.where(tmp13, tmp15, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp10, tmp28, tmp29) tl.store(out_ptr0 + x6, tmp30, xmask) @triton.jit def triton_poi_fused_cat_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 % 12 x3 = xindex // 432 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2 + 64 * x3), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp19 & tmp10 tmp21 = tl.load(in_ptr1 + (-5 + x0 + 4 * x1 + 16 * (-4 + x2) + 64 * x3), tmp20 & xmask, other=0.0) tmp22 = 0.0 tmp23 = tmp21 > tmp22 tmp24 = 0.2 tmp25 = tmp21 * tmp24 tmp26 = tl.where(tmp23, tmp21, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp20, tmp26, tmp27) tmp29 = tmp11 >= tmp17 tl.full([1], 12, tl.int64) tmp32 = tmp29 & tmp10 tmp33 = tl.load(in_ptr2 + (-5 + x0 + 4 * x1 + 16 * (-8 + x2) + 64 * x3), tmp32 & xmask, other=0.0) tmp34 = tmp33 > tmp22 tmp35 = tmp33 * tmp24 tmp36 = tl.where(tmp34, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp32, tmp36, tmp37) tmp39 = tl.where(tmp19, tmp28, tmp38) tmp40 = tl.where(tmp13, tmp15, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp10, tmp40, tmp41) tl.store(out_ptr0 + x6, tmp42, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tl.full([1], 16, tl.int64) tmp31 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp28 & xmask, other=0.0) tmp32 = tmp31 > tmp11 tmp33 = tmp31 * tmp13 tmp34 = tl.where(tmp32, tmp31, tmp33) tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp28, tmp34, tmp35) tmp37 = tl.where(tmp21, tmp27, tmp36) tmp38 = tl.where(tmp9, tmp17, tmp37) tmp39 = tl.where(tmp4, tmp5, tmp38) tl.store(out_ptr0 + x3, tmp39, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 20 x0 = xindex % 16 x2 = xindex // 320 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tmp29 = tl.full([1], 16, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp31 & xmask, other=0.0) tmp33 = tmp32 > tmp11 tmp34 = tmp32 * tmp13 tmp35 = tl.where(tmp33, tmp32, tmp34) tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp31, tmp35, tmp36) tmp38 = tmp0 >= tmp29 tl.full([1], 20, tl.int64) tmp41 = tl.load(in_ptr4 + (x0 + 16 * (-16 + x1) + 64 * x2), tmp38 & xmask, other=0.0) tmp42 = tmp41 > tmp11 tmp43 = tmp41 * tmp13 tmp44 = tl.where(tmp42, tmp41, tmp43) tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp38, tmp44, tmp45) tmp47 = tl.where(tmp31, tmp37, tmp46) tmp48 = tl.where(tmp21, tmp27, tmp47) tmp49 = tl.where(tmp9, tmp17, tmp48) tmp50 = tl.where(tmp4, tmp5, tmp49) tl.store(out_ptr0 + x3, tmp50, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2880 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_7(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_5, (4, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_6, (4, 20, 3, 3), (180, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32) triton_poi_fused_cat_constant_pad_nd_1[grid(1152)](primals_1, buf1, buf2, 1152, XBLOCK=128, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch.float32 ) triton_poi_fused_cat_constant_pad_nd_2[grid(1728)](primals_1, buf1, buf3, buf4, 1728, XBLOCK=128, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_3[grid(1024)](primals_1, buf1, buf3, buf5, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32 ) triton_poi_fused_constant_pad_nd_4[grid(2304)](buf6, buf7, 2304, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_5[grid(1280)](primals_1, buf1, buf3, buf5, buf8, buf9, 1280, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf10 = empty_strided_cuda((4, 20, 6, 6), (720, 36, 6, 1), torch. float32) triton_poi_fused_constant_pad_nd_6[grid(2880)](buf9, buf10, 2880, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = extern_kernels.convolution(buf10, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_7[grid(256)](buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf8, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf5, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf3, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf1, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return (buf12, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf2, buf4, buf7, buf10, buf13, buf14, buf15, buf16, buf17) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class DenseConv2dLayer_5CNew(nn.Module): def __init__(self, in_channels, latent_channels, kernel_size=3, stride= 1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm ='none', sn=False): super(DenseConv2dLayer_5CNew, self).__init__() self.conv1 = Conv2dLayer(in_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv2 = Conv2dLayer(in_channels + latent_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv3 = Conv2dLayer(in_channels + latent_channels * 2, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv4 = Conv2dLayer(in_channels + latent_channels * 3, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv5 = Conv2dLayer(in_channels + latent_channels * 4, in_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv2.conv2d.weight primals_4 = self.conv3.conv2d.weight primals_5 = self.conv4.conv2d.weight primals_6 = self.conv5.conv2d.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
LenKerr/Semantic-Colorization-GAN
DenseConv2dLayer_5C
false
5,526
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super().__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.acti # ... truncated (>4000 chars) for memory efficiency
DotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_weights => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_weights => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [att_weights, att_out], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class DotProductAttention(nn.Module): def __init__(self, k_dim, dropout=0.1): super(DotProductAttention, self).__init__() self.scale = 1.0 / math.sqrt(k_dim) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, mask=None): """ :param q: (bz, q_len, q_dim) :param k: (bz, k_len, k_dim) :param v: (bz, v_len, v_dim) k_len == v_len v_dim == q_dim :param mask: (bz, k_len) 填充部分为0 :return: (bz, q_len, v_dim) """ att_score = torch.bmm(q, k.transpose(1, 2)).mul(self.scale) if mask is not None: att_score.masked_fill_(~mask[:, None, :], -1000000000.0) att_weights = self.softmax(att_score) if self.training: att_weights = self.dropout(att_weights) att_out = torch.bmm(att_weights, v) return att_out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'k_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 del buf2 return buf3, class DotProductAttentionNew(nn.Module): def __init__(self, k_dim, dropout=0.1): super(DotProductAttentionNew, self).__init__() self.scale = 1.0 / math.sqrt(k_dim) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
LindgeW/DomainAdaption4DependencyParsing
DotProductAttention
false
5,527
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import math import torch import torch.nn as nn class Model(nn.Module): def __init__(self, k_dim, dropout=0.1): super().__init__() self.scale = 1.0 / math.sqrt(k_dim) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) def forward(self, q, k, v, mask=None): """ :param q: (bz, q_len, q_dim) :param k: (bz, k_len, k_dim) :param v: (bz, v_len, v_dim) k_len == v_len v_dim == q_dim :param mask: (bz, k_len) 填充部分为0 :return: (bz, q_len, v_dim) """ att_score = torch.bmm(q, k.transpose(1, 2)).mul(self.scale) if mask is not None: att_score.masked_fill_(~mask[:, None, :], -1000000000.0) att_weights = self.softmax(att_score) if self.training: att_weights = self.dropout(att_weights) att_out = torch.bmm(att_weights, v) return att_out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [4]
ResidualDenseBlock_5C
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdv3m5a33kovvtng5iilth4k6mtnyfcota6hhwoiqm34iumu7wi.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/er/cerxfmiafmpnsn44ihnsss2pvd76g6x2i62qavnas5ygqo5mj43d.py # Topologically Sorted Source Nodes: [cat, x_3], Original ATen: [aten.cat, aten.constant_pad_nd] # Source node to ATen node mapping: # cat => cat # x_3 => constant_pad_nd_1 # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where], 1), kwargs = {}) # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_cat_constant_pad_nd_1 = async_compile.triton('triton_poi_fused_cat_constant_pad_nd_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_constant_pad_nd_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_constant_pad_nd_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) % 8 x3 = (xindex // 288) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp12 = tmp11 >= tmp1 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2) + (64*x3)), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp10 tmp20 = tl.load(in_ptr1 + ((-5) + x0 + (4*x1) + (16*((-4) + x2)) + (64*x3)), tmp19 & xmask, other=0.0) tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.2 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp19, tmp25, tmp26) tmp28 = tl.where(tmp13, tmp15, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp10, tmp28, tmp29) tl.store(out_ptr0 + (x6), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s3/cs34ah7ioinuak53u3oyhgsrsqeqfnovsb4djwnzfgkw44v6u547.py # Topologically Sorted Source Nodes: [cat_1, x_6], Original ATen: [aten.cat, aten.constant_pad_nd] # Source node to ATen node mapping: # cat_1 => cat_1 # x_6 => constant_pad_nd_2 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1], 1), kwargs = {}) # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_cat_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_cat_constant_pad_nd_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) % 12 x3 = (xindex // 432) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp12 = tmp11 >= tmp1 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2) + (64*x3)), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp19 & tmp10 tmp21 = tl.load(in_ptr1 + ((-5) + x0 + (4*x1) + (16*((-4) + x2)) + (64*x3)), tmp20 & xmask, other=0.0) tmp22 = 0.0 tmp23 = tmp21 > tmp22 tmp24 = 0.2 tmp25 = tmp21 * tmp24 tmp26 = tl.where(tmp23, tmp21, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp20, tmp26, tmp27) tmp29 = tmp11 >= tmp17 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp11 < tmp30 tmp32 = tmp29 & tmp10 tmp33 = tl.load(in_ptr2 + ((-5) + x0 + (4*x1) + (16*((-8) + x2)) + (64*x3)), tmp32 & xmask, other=0.0) tmp34 = tmp33 > tmp22 tmp35 = tmp33 * tmp24 tmp36 = tl.where(tmp34, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp32, tmp36, tmp37) tmp39 = tl.where(tmp19, tmp28, tmp38) tmp40 = tl.where(tmp13, tmp15, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp10, tmp40, tmp41) tl.store(out_ptr0 + (x6), tmp42, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2h/c2hdtntquyhqkvcjcbctuzny5hhijks5jcqplynkbwfrthtih25e.py # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2], 1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 16 x0 = xindex % 16 x2 = (xindex // 256) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tmp29 = tl.full([1], 16, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tl.load(in_ptr3 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp28 & xmask, other=0.0) tmp32 = tmp31 > tmp11 tmp33 = tmp31 * tmp13 tmp34 = tl.where(tmp32, tmp31, tmp33) tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp28, tmp34, tmp35) tmp37 = tl.where(tmp21, tmp27, tmp36) tmp38 = tl.where(tmp9, tmp17, tmp37) tmp39 = tl.where(tmp4, tmp5, tmp38) tl.store(out_ptr0 + (x3), tmp39, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rc/crcdywpxyr2fxsveen5yolsghlkstdms4uvwp7zkfwhcmkjmpxpg.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_9 => constant_pad_nd_3 # Graph fragment: # %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat_2, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_4 = async_compile.triton('triton_poi_fused_constant_pad_nd_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4p/c4pwo5hnpxyletkwji2bfgdsgviax75psfwuih5qzyaah2skdcdn.py # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1, %where_2, %where_3], 1), kwargs = {}) triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 20 x0 = xindex % 16 x2 = (xindex // 320) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tmp29 = tl.full([1], 16, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr3 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp31 & xmask, other=0.0) tmp33 = tmp32 > tmp11 tmp34 = tmp32 * tmp13 tmp35 = tl.where(tmp33, tmp32, tmp34) tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp31, tmp35, tmp36) tmp38 = tmp0 >= tmp29 tmp39 = tl.full([1], 20, tl.int64) tmp40 = tmp0 < tmp39 tmp41 = tl.load(in_ptr4 + (x0 + (16*((-16) + x1)) + (64*x2)), tmp38 & xmask, other=0.0) tmp42 = tmp41 > tmp11 tmp43 = tmp41 * tmp13 tmp44 = tl.where(tmp42, tmp41, tmp43) tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp38, tmp44, tmp45) tmp47 = tl.where(tmp31, tmp37, tmp46) tmp48 = tl.where(tmp21, tmp27, tmp47) tmp49 = tl.where(tmp9, tmp17, tmp48) tmp50 = tl.where(tmp4, tmp5, tmp49) tl.store(out_ptr0 + (x3), tmp50, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s7/cs7remm7dxallug7mpmvc7novi2rckl5n32tcwbq5syx4cb4lqk6.py # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_12 => constant_pad_nd_4 # Graph fragment: # %constant_pad_nd_4 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat_3, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_6 = async_compile.triton('triton_poi_fused_constant_pad_nd_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2880 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yx/cyxxe53fwtxbbz4ado2vqkuowx34tuo7up564iwpnh2sggpenycv.py # Topologically Sorted Source Nodes: [x_14, mul, x5], Original ATen: [aten.leaky_relu, aten.mul, aten.add, aten.leaky_relu_backward] # Source node to ATen node mapping: # mul => mul_5 # x5 => add # x_14 => gt_4, mul_4, where_4 # Graph fragment: # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_4, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_1), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_4, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_7 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp8 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.1 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp5 > tmp1 tl.store(out_ptr0 + (x0), tmp9, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/is/cisodw5gxdmlgapnrqpu7bcsl5srnastcmg2c73tt3lqfqu2bmk6.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_11 => gt_3, mul_3, where_3 # Graph fragment: # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_3, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_8 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_5, (4, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_6, (4, 20, 3, 3), (180, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [cat, x_3], Original ATen: [aten.cat, aten.constant_pad_nd] triton_poi_fused_cat_constant_pad_nd_1.run(primals_1, buf1, buf2, 1152, grid=grid(1152), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1, x_6], Original ATen: [aten.cat, aten.constant_pad_nd] triton_poi_fused_cat_constant_pad_nd_2.run(primals_1, buf1, buf3, buf4, 1728, grid=grid(1728), stream=stream0) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(primals_1, buf1, buf3, buf5, buf6, 1024, grid=grid(1024), stream=stream0) buf7 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_4.run(buf6, buf7, 2304, grid=grid(2304), stream=stream0) del buf6 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] triton_poi_fused_cat_5.run(primals_1, buf1, buf3, buf5, buf8, buf9, 1280, grid=grid(1280), stream=stream0) buf10 = empty_strided_cuda((4, 20, 6, 6), (720, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_6.run(buf9, buf10, 2880, grid=grid(2880), stream=stream0) del buf9 # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_14, mul, x5], Original ATen: [aten.leaky_relu, aten.mul, aten.add, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_7.run(buf11, primals_1, buf12, buf13, 256, grid=grid(256), stream=stream0) del buf11 del primals_1 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf8, buf14, 256, grid=grid(256), stream=stream0) del buf8 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf5, buf15, 256, grid=grid(256), stream=stream0) del buf5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf3, buf16, 256, grid=grid(256), stream=stream0) del buf3 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_8.run(buf1, buf17, 256, grid=grid(256), stream=stream0) del buf1 return (buf12, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf2, buf4, buf7, buf10, buf13, buf14, buf15, buf16, buf17, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 20, 3, 3), (180, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class ResidualDenseBlock_5C(nn.Module): def __init__(self, in_channels, latent_channels, kernel_size=3, stride= 1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm ='none', sn=False): super(ResidualDenseBlock_5C, self).__init__() self.conv1 = Conv2dLayer(in_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv2 = Conv2dLayer(in_channels + latent_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv3 = Conv2dLayer(in_channels + latent_channels * 2, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv4 = Conv2dLayer(in_channels + latent_channels * 3, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv5 = Conv2dLayer(in_channels + latent_channels * 4, in_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, x): residual = x x1 = self.conv1(x) x2 = self.conv2(torch.cat((x, x1), 1)) x3 = self.conv3(torch.cat((x, x1, x2), 1)) x4 = self.conv4(torch.cat((x, x1, x2, x3), 1)) x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) x5 = 0.1 * x5 + residual return x5 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'latent_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_cat_constant_pad_nd_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 % 8 x3 = xindex // 288 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2 + 64 * x3), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tl.full([1], 8, tl.int64) tmp19 = tmp16 & tmp10 tmp20 = tl.load(in_ptr1 + (-5 + x0 + 4 * x1 + 16 * (-4 + x2) + 64 * x3), tmp19 & xmask, other=0.0) tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.2 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp19, tmp25, tmp26) tmp28 = tl.where(tmp13, tmp15, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp10, tmp28, tmp29) tl.store(out_ptr0 + x6, tmp30, xmask) @triton.jit def triton_poi_fused_cat_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 % 12 x3 = xindex // 432 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2 + 64 * x3), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp19 & tmp10 tmp21 = tl.load(in_ptr1 + (-5 + x0 + 4 * x1 + 16 * (-4 + x2) + 64 * x3), tmp20 & xmask, other=0.0) tmp22 = 0.0 tmp23 = tmp21 > tmp22 tmp24 = 0.2 tmp25 = tmp21 * tmp24 tmp26 = tl.where(tmp23, tmp21, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp20, tmp26, tmp27) tmp29 = tmp11 >= tmp17 tl.full([1], 12, tl.int64) tmp32 = tmp29 & tmp10 tmp33 = tl.load(in_ptr2 + (-5 + x0 + 4 * x1 + 16 * (-8 + x2) + 64 * x3), tmp32 & xmask, other=0.0) tmp34 = tmp33 > tmp22 tmp35 = tmp33 * tmp24 tmp36 = tl.where(tmp34, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp32, tmp36, tmp37) tmp39 = tl.where(tmp19, tmp28, tmp38) tmp40 = tl.where(tmp13, tmp15, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp10, tmp40, tmp41) tl.store(out_ptr0 + x6, tmp42, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tl.full([1], 16, tl.int64) tmp31 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp28 & xmask, other=0.0) tmp32 = tmp31 > tmp11 tmp33 = tmp31 * tmp13 tmp34 = tl.where(tmp32, tmp31, tmp33) tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp28, tmp34, tmp35) tmp37 = tl.where(tmp21, tmp27, tmp36) tmp38 = tl.where(tmp9, tmp17, tmp37) tmp39 = tl.where(tmp4, tmp5, tmp38) tl.store(out_ptr0 + x3, tmp39, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 20 x0 = xindex % 16 x2 = xindex // 320 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tmp10 > tmp11 tmp13 = 0.2 tmp14 = tmp10 * tmp13 tmp15 = tl.where(tmp12, tmp10, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 12, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp21 & xmask, other=0.0) tmp23 = tmp22 > tmp11 tmp24 = tmp22 * tmp13 tmp25 = tl.where(tmp23, tmp22, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tmp29 = tl.full([1], 16, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp31 & xmask, other=0.0) tmp33 = tmp32 > tmp11 tmp34 = tmp32 * tmp13 tmp35 = tl.where(tmp33, tmp32, tmp34) tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp31, tmp35, tmp36) tmp38 = tmp0 >= tmp29 tl.full([1], 20, tl.int64) tmp41 = tl.load(in_ptr4 + (x0 + 16 * (-16 + x1) + 64 * x2), tmp38 & xmask, other=0.0) tmp42 = tmp41 > tmp11 tmp43 = tmp41 * tmp13 tmp44 = tl.where(tmp42, tmp41, tmp43) tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp38, tmp44, tmp45) tmp47 = tl.where(tmp31, tmp37, tmp46) tmp48 = tl.where(tmp21, tmp27, tmp47) tmp49 = tl.where(tmp9, tmp17, tmp48) tmp50 = tl.where(tmp4, tmp5, tmp49) tl.store(out_ptr0 + x3, tmp50, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2880 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.1 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp5 > tmp1 tl.store(out_ptr0 + x0, tmp9, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_5, (4, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_6, (4, 20, 3, 3), (180, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32) triton_poi_fused_cat_constant_pad_nd_1[grid(1152)](primals_1, buf1, buf2, 1152, XBLOCK=128, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch.float32 ) triton_poi_fused_cat_constant_pad_nd_2[grid(1728)](primals_1, buf1, buf3, buf4, 1728, XBLOCK=128, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_3[grid(1024)](primals_1, buf1, buf3, buf5, buf6, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 16, 6, 6), (576, 36, 6, 1), torch.float32 ) triton_poi_fused_constant_pad_nd_4[grid(2304)](buf6, buf7, 2304, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_5[grid(1280)](primals_1, buf1, buf3, buf5, buf8, buf9, 1280, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 20, 6, 6), (720, 36, 6, 1), torch. float32) triton_poi_fused_constant_pad_nd_6[grid(2880)](buf9, buf10, 2880, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf11 = extern_kernels.convolution(buf10, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_7[grid(256)]( buf11, primals_1, buf12, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf11 del primals_1 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf8, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf5, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf3, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_8[grid(256)](buf1, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return (buf12, primals_2, primals_3, primals_4, primals_5, primals_6, buf0, buf2, buf4, buf7, buf10, buf13, buf14, buf15, buf16, buf17) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class ResidualDenseBlock_5CNew(nn.Module): def __init__(self, in_channels, latent_channels, kernel_size=3, stride= 1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm ='none', sn=False): super(ResidualDenseBlock_5CNew, self).__init__() self.conv1 = Conv2dLayer(in_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv2 = Conv2dLayer(in_channels + latent_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv3 = Conv2dLayer(in_channels + latent_channels * 2, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv4 = Conv2dLayer(in_channels + latent_channels * 3, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv5 = Conv2dLayer(in_channels + latent_channels * 4, in_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv2.conv2d.weight primals_4 = self.conv3.conv2d.weight primals_5 = self.conv4.conv2d.weight primals_6 = self.conv5.conv2d.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
LenKerr/Semantic-Colorization-GAN
ResidualDenseBlock_5C
false
5,528
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super().__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.acti # ... truncated (>4000 chars) for memory efficiency
TransposeAdaINConv2dLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ks/cksi7p5iq6amvatj4ylxlyaufmiucpgysxv6irpv4u55ro6d7sin.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ya/cya2grnbhraytq2wzrkx5sd2ottwnbrnd5ohd2xstcxyryneuc25.py # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add_4 # mv => mul_4, sum_1 # norm => pow_1, pow_2, sum_2 # truediv => div # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add_4), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mv_1 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mv_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mv_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp26, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qi/cqiozgecuvqtnurxrggbllqpuci3n65ycew5qi5gdqg44ypxzegy.py # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] # Source node to ATen node mapping: # mv_1 => mul_5, sum_3 # truediv => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {}) # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [1]), kwargs = {}) triton_per_fused_div_mv_2 = async_compile.triton('triton_per_fused_div_mv_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mv_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qa/cqaed4ios3xqwlv4d3cciikkdz7d73vhwkegurd5cxca3y7htmvg.py # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add_1 => add_5 # norm_1 => pow_3, pow_4, sum_4 # truediv_1 => div_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add_5), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_3 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/c2/cc2arficwjs4sforhl25gdfmb3uzfg7hkw46gq3mxgv57jy52z32.py # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] # Source node to ATen node mapping: # sigma => mul_7, sum_6 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sum_3), kwargs = {}) # %sum_6 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_7,), kwargs = {}) triton_per_fused_dot_4 = async_compile.triton('triton_per_fused_dot_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_dot_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kw/ckwzptlssdpmtxi6pt23ik63xcuqar2giaakuqtgizxlg5weagc7.py # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_2 => div_2 # Graph fragment: # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_4, %expand), kwargs = {}) triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bv/cbv6jakt3kcc7zdwd3bg7driglzwnrjamfcojzypctox67qmiqqu.py # Topologically Sorted Source Nodes: [x_2, mean, x_3, mean_2, add_2, var_2, x_4, mul_2, content, x_5], Original ATen: [aten.convolution, aten.mean, aten.sub, aten.add, aten.rsqrt, aten.mul, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_2 => add_6 # content => add_7 # mean => mean # mean_2 => mean_1 # mul_2 => mul_10 # var_2 => rsqrt # x_2 => convolution # x_3 => sub # x_4 => mul_9 # x_5 => gt, mul_11, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %div_2, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_3, [2]), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %view_4), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_5, [2]), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %select), kwargs = {}) # %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, %select_1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_7, 0), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_7, %mul_11), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_per_fused_add_convolution_leaky_relu_leaky_relu_backward_mean_mul_rsqrt_sub_6 = async_compile.triton('triton_per_fused_add_convolution_leaky_relu_leaky_relu_backward_mean_mul_rsqrt_sub_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1024, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_convolution_leaky_relu_leaky_relu_backward_mean_mul_rsqrt_sub_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_convolution_leaky_relu_leaky_relu_backward_mean_mul_rsqrt_sub_6(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1024 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r2 = rindex x3 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (r2 + (25*x3)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (x0 + (8*x1)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (4 + x0 + (8*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(rmask & xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 25.0 tmp8 = tmp6 / tmp7 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(rmask & xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp14 / tmp7 tmp16 = 1e-08 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tmp19 = tmp9 * tmp18 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.2 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tmp29 = tmp28 > tmp24 tl.store(in_out_ptr0 + (r2 + (25*x3)), tmp2, rmask & xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp8, xmask) tl.debug_barrier() tl.store(in_out_ptr2 + (x3), tmp18, xmask) tl.store(in_out_ptr3 + (r2 + (25*x3)), tmp28, rmask & xmask) tl.store(out_ptr0 + (r2 + (25*x3)), tmp29, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (32, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 65536, grid=grid(65536), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, ), (1, ), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse buf23 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mv_1.run(buf3, primals_4, primals_2, buf1, buf23, 1, 64, grid=grid(1), stream=stream0) buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] triton_per_fused_div_mv_2.run(primals_4, buf1, buf3, buf4, 4, 64, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_3.run(buf4, buf6, 1, 4, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] triton_per_fused_dot_4.run(buf6, buf4, buf7, 1, 4, grid=grid(1), stream=stream0) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] triton_poi_fused_div_5.run(primals_4, buf7, buf8, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (256, 4, 5, 5), (100, 25, 5, 1)) buf10 = buf9; del buf9 # reuse buf11 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf12 = buf11; del buf11 # reuse buf13 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf14 = reinterpret_tensor(buf13, (256, 4, 1, 1), (4, 1, 1, 1), 0); del buf13 # reuse buf15 = empty_strided_cuda((256, 4, 5, 5), (100, 25, 5, 1), torch.float32) buf16 = buf15; del buf15 # reuse buf17 = empty_strided_cuda((256, 4, 5, 5), (100, 25, 5, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2, mean, x_3, mean_2, add_2, var_2, x_4, mul_2, content, x_5], Original ATen: [aten.convolution, aten.mean, aten.sub, aten.add, aten.rsqrt, aten.mul, aten.leaky_relu, aten.leaky_relu_backward] triton_per_fused_add_convolution_leaky_relu_leaky_relu_backward_mean_mul_rsqrt_sub_6.run(buf10, buf12, buf14, buf16, primals_5, primals_6, buf17, 1024, 25, grid=grid(1024), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [], Original ATen: [] buf18 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf18, (4, ), (1, )) del buf1 # Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div] buf24 = torch.ops.aten.set_.source_Tensor(primals_3, buf23) assert_size_stride(buf24, (64, ), (1, )) del primals_3 return (buf16, buf8, primals_2, primals_4, buf0, buf3, buf6, buf7, buf8, buf10, reinterpret_tensor(buf12, (256, 4, 1, 1), (4, 1, 1, 1), 0), buf14, reinterpret_tensor(primals_6, (256, 4, 1, 1), (8, 1, 1, 1), 0), buf17, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super(AdaptiveInstanceNorm2d, self).__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super(AdaINConv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x, style): x = self.pad(x) x = self.conv2d(x) x = self.norm(x, style) if self.activation: x = self.activation(x) return x class TransposeAdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True, scale_factor=2): super(TransposeAdaINConv2dLayer, self).__init__() self.scale_factor = scale_factor self.conv2d = AdaINConv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, sn) def forward(self, x, style): x = F.interpolate(x, scale_factor=self.scale_factor, mode='nearest') x = self.conv2d(x, style) return x def get_inputs(): return [torch.rand([256, 4, 4, 4]), torch.rand([32, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, None) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused_add_convolution_leaky_relu_leaky_relu_backward_mean_mul_rsqrt_sub_6( in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 1024 rnumel = 25 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex x3 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 25 * x3), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(rmask & xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 25.0 tmp8 = tmp6 / tmp7 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(rmask & xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp14 / tmp7 tmp16 = 1e-08 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tmp19 = tmp9 * tmp18 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.2 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tmp29 = tmp28 > tmp24 tl.store(in_out_ptr0 + (r2 + 25 * x3), tmp2, rmask & xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp8, xmask) tl.debug_barrier() tl.store(in_out_ptr2 + x3, tmp18, xmask) tl.store(in_out_ptr3 + (r2 + 25 * x3), tmp28, rmask & xmask) tl.store(out_ptr0 + (r2 + 25 * x3), tmp29, rmask & xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (32, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 4, 8, 8), (256, 64, 8, 1), torch. float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(65536)](primals_1, buf0, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64,), (1,), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 buf23 = empty_strided_cuda((64,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf3, primals_4, primals_2, buf1, buf23, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_div_mv_2[grid(4)](primals_4, buf1, buf3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf4, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_dot_4[grid(1)](buf6, buf4, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_5[grid(256)](primals_4, buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (256, 4, 5, 5), (100, 25, 5, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf12 = buf11 del buf11 buf13 = empty_strided_cuda((256, 4), (4, 1), torch.float32) buf14 = reinterpret_tensor(buf13, (256, 4, 1, 1), (4, 1, 1, 1), 0) del buf13 buf15 = empty_strided_cuda((256, 4, 5, 5), (100, 25, 5, 1), torch. float32) buf16 = buf15 del buf15 buf17 = empty_strided_cuda((256, 4, 5, 5), (100, 25, 5, 1), torch.bool) triton_per_fused_add_convolution_leaky_relu_leaky_relu_backward_mean_mul_rsqrt_sub_6[ grid(1024)](buf10, buf12, buf14, buf16, primals_5, primals_6, buf17, 1024, 25, XBLOCK=1, num_warps=2, num_stages=1) del primals_5 buf18 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf18, (4,), (1,)) del buf1 buf24 = torch.ops.aten.set_.source_Tensor(primals_3, buf23) assert_size_stride(buf24, (64,), (1,)) del primals_3 return (buf16, buf8, primals_2, primals_4, buf0, buf3, buf6, buf7, buf8, buf10, reinterpret_tensor(buf12, (256, 4, 1, 1), (4, 1, 1, 1), 0), buf14, reinterpret_tensor(primals_6, (256, 4, 1, 1), (8, 1, 1, 1), 0), buf17) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super(AdaptiveInstanceNorm2d, self).__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super(AdaINConv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation) def forward(self, x, style): x = self.pad(x) x = self.conv2d(x) x = self.norm(x, style) if self.activation: x = self.activation(x) return x class TransposeAdaINConv2dLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True, scale_factor=2): super(TransposeAdaINConv2dLayerNew, self).__init__() self.scale_factor = scale_factor self.conv2d = AdaINConv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, sn) def forward(self, input_0, input_1): primals_2 = self.conv2d.conv2d.module.bias primals_5 = self.conv2d.conv2d.module.weight_u primals_3 = self.conv2d.conv2d.module.weight_v primals_4 = self.conv2d.conv2d.module.weight_bar primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
LenKerr/Semantic-Colorization-GAN
TransposeAdaINConv2dLayer
false
5,529
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, eps=1e-08): super().__init__() self.eps = eps def IN_noWeight(self, x): N, C = x.size(0), x.size(1) mean = x.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) x = x - mean var = torch.mul(x, x) var = var.contiguous().view(N, C, -1).mean(2).contiguous().view(N, C, 1, 1) var = torch.rsqrt(var + self.eps) x = x * var return x def Apply_style(self, content, style): style = style.contiguous().view([-1, 2, content.size(1), 1, 1]) content = content * style[:, 0] + style[:, 1] return content def forward(self, content, style): normalized_content = self.IN_noWeight(content) stylized_content = self.Apply_style(normalized_content, style) return stylized_content class AdaINConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) self.norm = AdaptiveInstanceNorm2d() if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigm # ... truncated (>4000 chars) for memory efficiency
TransposeConv2dLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ya/cya2grnbhraytq2wzrkx5sd2ottwnbrnd5ohd2xstcxyryneuc25.py # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add_4 # mv => mul_4, sum_1 # norm => pow_1, pow_2, sum_2 # truediv => div # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add_4), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mv_1 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mv_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mv_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp25, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp26, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qi/cqiozgecuvqtnurxrggbllqpuci3n65ycew5qi5gdqg44ypxzegy.py # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] # Source node to ATen node mapping: # mv_1 => mul_5, sum_3 # truediv => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add_4), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {}) # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [1]), kwargs = {}) triton_per_fused_div_mv_2 = async_compile.triton('triton_per_fused_div_mv_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mv_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qa/cqaed4ios3xqwlv4d3cciikkdz7d73vhwkegurd5cxca3y7htmvg.py # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add_1 => add_5 # norm_1 => pow_3, pow_4, sum_4 # truediv_1 => div_1 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add_5), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_3 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/c2/cc2arficwjs4sforhl25gdfmb3uzfg7hkw46gq3mxgv57jy52z32.py # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] # Source node to ATen node mapping: # sigma => mul_7, sum_6 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sum_3), kwargs = {}) # %sum_6 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_7,), kwargs = {}) triton_per_fused_dot_4 = async_compile.triton('triton_per_fused_dot_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_dot_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kw/ckwzptlssdpmtxi6pt23ik63xcuqar2giaakuqtgizxlg5weagc7.py # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_2 => div_2 # Graph fragment: # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_4, %expand), kwargs = {}) triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/n4/cn4gcl6zss6vqmwfppmtgzfmg5aubgn4jeyyouri7roaln7yxyx4.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_3 => gt, mul_8, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul_8), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_6 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_6(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(in_out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, ), (1, ), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse buf17 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mv_1.run(buf3, primals_4, primals_2, buf1, buf17, 1, 64, grid=grid(1), stream=stream0) buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [truediv, mv_1], Original ATen: [aten.div, aten.mv] triton_per_fused_div_mv_2.run(primals_4, buf1, buf3, buf4, 4, 64, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_3.run(buf4, buf6, 1, 4, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.dot] triton_per_fused_dot_4.run(buf6, buf4, buf7, 1, 4, grid=grid(1), stream=stream0) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] triton_poi_fused_div_5.run(primals_4, buf7, buf8, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 5, 5), (100, 25, 5, 1)) buf10 = buf9; del buf9 # reuse buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_6.run(buf10, buf11, 400, grid=grid(400), stream=stream0) # Topologically Sorted Source Nodes: [], Original ATen: [] buf12 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf12, (4, ), (1, )) del buf1 # Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div] buf18 = torch.ops.aten.set_.source_Tensor(primals_3, buf17) assert_size_stride(buf18, (64, ), (1, )) del primals_3 return (buf10, buf8, primals_2, primals_4, buf0, buf3, buf6, buf7, buf8, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class TransposeConv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True, scale_factor=2): super(TransposeConv2dLayer, self).__init__() self.scale_factor = scale_factor self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, x): x = F.interpolate(x, scale_factor=self.scale_factor, mode='nearest') x = self.conv2d(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (128 + r0), None) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (192 + r0), None) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp25, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None) @triton.jit def triton_per_fused_div_mv_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) @triton.jit def triton_per_fused_dot_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused_div_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_6(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64,), (1,), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 buf17 = empty_strided_cuda((64,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mv_1[grid(1)](buf3, primals_4, primals_2, buf1, buf17, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_div_mv_2[grid(4)](primals_4, buf1, buf3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_3[grid(1)](buf4, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_dot_4[grid(1)](buf6, buf4, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf4 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_5[grid(256)](primals_4, buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf0, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 5, 5), (100, 25, 5, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_6[grid(400)](buf10, buf11, 400, XBLOCK=256, num_warps=4, num_stages=1) buf12 = torch.ops.aten.set_.source_Tensor(primals_2, buf6) assert_size_stride(buf12, (4,), (1,)) del buf1 buf18 = torch.ops.aten.set_.source_Tensor(primals_3, buf17) assert_size_stride(buf18, (64,), (1,)) del primals_3 return (buf10, buf8, primals_2, primals_4, buf0, buf3, buf6, buf7, buf8, buf11) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class TransposeConv2dLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True, scale_factor=2): super(TransposeConv2dLayerNew, self).__init__() self.scale_factor = scale_factor self.conv2d = Conv2dLayer(in_channels, out_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, input_0): primals_2 = self.conv2d.conv2d.module.weight_u primals_3 = self.conv2d.conv2d.module.weight_v primals_1 = self.conv2d.conv2d.module.weight_bar primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
LenKerr/Semantic-Colorization-GAN
TransposeConv2dLayer
false
5,530
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super().__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation = # ... truncated (>4000 chars) for memory efficiency
BetaVAE_H
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dq/cdq2ys2ixsgbmto6z3gv6fap6g6tzfirrsvo2mvozuimf2d5feuy.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 961) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/m2/cm2tzhnnikquck5qjqxil3z7xbvb4bmlv4dxtpfnobmoa2gn7kpo.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 196) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yl/cylhzvm5azwoxpfzq6cmn4wrmtveszsjfaehpp6d3sldzewuqoy4.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 36) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7x/c7x644ci2kcridm32huqyaq4krk2jeobv5q45sjg2zadi4i5xifb.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5q/c5qlaxbbxxxofs4euqefep4bsfkrxrk3yorlgw53njxbvityagxo.py # Topologically Sorted Source Nodes: [div, std, mul, z], Original ATen: [aten.div, aten.exp, aten.mul, aten.add] # Source node to ATen node mapping: # div => div # mul => mul # std => exp # z => add # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_4, 2), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %normal_functional), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, %mul), kwargs = {}) triton_poi_fused_add_div_exp_mul_4 = async_compile.triton('triton_poi_fused_add_div_exp_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_exp_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = (xindex // 10) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (20*x1)), xmask) tmp1 = tl.load(in_ptr0 + (10 + x0 + (20*x1)), xmask) tmp5 = tl.load(in_ptr1 + (x2), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/du/cduf6krob444andnckdtxb622t46y6p2ense7shwixlcd3vtk2mf.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_5 => relu_4 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dk/cdkqzxyla22osetdivjgvawrv3ugrhoc4pphb4useu7rir2oxw7c.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_10 => convolution_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_20, %primals_21, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args args.clear() assert_size_stride(primals_1, (32, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (20, 256), (256, 1)) assert_size_stride(primals_11, (20, ), (1, )) assert_size_stride(primals_12, (256, 10), (10, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_17, (32, ), (1, )) assert_size_stride(primals_18, (32, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_19, (32, ), (1, )) assert_size_stride(primals_20, (32, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_21, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 31, 31), (30752, 961, 31, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 123008, grid=grid(123008), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 14, 14), (6272, 196, 14, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 25088, grid=grid(25088), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 6, 6), (2304, 36, 6, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 9216, grid=grid(9216), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 2, 2), (256, 4, 2, 1)) buf7 = buf6; del buf6 # reuse buf24 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_3.run(buf7, primals_9, buf24, 1024, grid=grid(1024), stream=stream0) del primals_9 buf8 = empty_strided_cuda((4, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (4, 256), (256, 1), 0), reinterpret_tensor(primals_10, (256, 20), (1, 256), 0), alpha=1, beta=1, out=buf8) del primals_11 buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [eps], Original ATen: [aten.normal_functional] buf10 = torch.ops.aten.normal_functional.default(buf9) buf11 = buf10 del buf10 buf12 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [div, std, mul, z], Original ATen: [aten.div, aten.exp, aten.mul, aten.add] triton_poi_fused_add_div_exp_mul_4.run(buf8, buf11, buf12, 40, grid=grid(40), stream=stream0) buf13 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf12, reinterpret_tensor(primals_12, (10, 256), (1, 10), 0), out=buf13) buf14 = buf13; del buf13 # reuse buf23 = empty_strided_cuda((4, 256), (256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_5.run(buf14, primals_13, buf23, 1024, grid=grid(1024), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(reinterpret_tensor(buf14, (4, 64, 2, 2), (256, 4, 2, 1), 0), primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 6, 6), (2304, 36, 6, 1)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d, x_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf16, primals_15, 9216, grid=grid(9216), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 32, 14, 14), (6272, 196, 14, 1)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d_1, x_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf18, primals_17, 25088, grid=grid(25088), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf19, (4, 32, 31, 31), (30752, 961, 31, 1)) buf20 = buf19; del buf19 # reuse # Topologically Sorted Source Nodes: [conv_transpose2d_2, x_9], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf20, primals_19, 123008, grid=grid(123008), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf22 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf22, primals_21, 49152, grid=grid(49152), stream=stream0) del primals_21 return (buf22, reinterpret_tensor(buf8, (4, 10), (20, 1), 0), reinterpret_tensor(buf8, (4, 10), (20, 1), 10), buf8, primals_1, primals_3, primals_4, primals_6, primals_8, primals_14, primals_16, primals_18, primals_20, buf1, buf3, buf5, reinterpret_tensor(buf7, (4, 256), (256, 1), 0), reinterpret_tensor(buf8, (4, 10), (20, 1), 10), buf11, buf12, reinterpret_tensor(buf14, (4, 64, 2, 2), (256, 4, 2, 1), 0), buf16, buf18, buf20, buf23, primals_12, primals_10, buf24, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((20, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((32, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((32, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def reparametrize(mu, logvar): std = logvar.div(2).exp() eps = std.data.new(std.size()).normal_() return mu + std * eps class Encoder_H(nn.Module): def __init__(self, input_shape=(64, 64), z_dim=10, nc=3, padding=1): super(Encoder_H, self).__init__() self.conv2d_1 = nn.Conv2d(nc, 32, 4, 2, padding) self.conv2d_2 = nn.Conv2d(32, 32, 4, 2, padding) self.conv2d_3 = nn.Conv2d(32, 64, 4, 2, padding) self.conv2d_4 = nn.Conv2d(64, 64, 4, 2, padding) self.flatten_shape, self.dconv_size = self._get_conv_output(input_shape , nc) self.linear = nn.Linear(self.flatten_shape, z_dim * 2) def _get_conv_output(self, shape, nc): bs = 1 dummy_x = torch.empty(bs, nc, *shape) x, dconv_size = self._forward_features(dummy_x) flatten_shape = x.flatten(1).size(1) return flatten_shape, dconv_size def _forward_features(self, x): size0 = x.shape[1:] x = torch.relu(self.conv2d_1(x)) size1 = x.shape[1:] x = torch.relu(self.conv2d_2(x)) size2 = x.shape[1:] x = torch.relu(self.conv2d_3(x)) size3 = x.shape[1:] x = torch.relu(self.conv2d_4(x)) size4 = x.shape[1:] return x, [size0, size1, size2, size3, size4] def forward(self, x): x = torch.relu(self.conv2d_1(x)) x = torch.relu(self.conv2d_2(x)) x = torch.relu(self.conv2d_3(x)) x = torch.relu(self.conv2d_4(x)) x = self.linear(x.flatten(1)) return x class Decoder_H(nn.Module): def __init__(self, output_shape, z_dim=10, nc=3, padding=1): super(Decoder_H, self).__init__() self.output_shape = output_shape flatten_shape = output_shape[-1][0] * output_shape[-1][1 ] * output_shape[-1][2] self.linear = nn.Linear(z_dim, flatten_shape) self.conv2d_1 = nn.ConvTranspose2d(64, 64, 4, 2, padding) self.conv2d_2 = nn.ConvTranspose2d(64, 32, 4, 2, padding) self.conv2d_3 = nn.ConvTranspose2d(32, 32, 4, 2, padding) self.conv2d_4 = nn.ConvTranspose2d(32, nc, 4, 2, padding) def _forward_features(self, x): x = torch.relu(self.conv2d_1(x, self.output_shape[3][1:])) x = torch.relu(self.conv2d_2(x, self.output_shape[2][1:])) x = torch.relu(self.conv2d_3(x, self.output_shape[1][1:])) x = self.conv2d_4(x, self.output_shape[0][1:]) return x def forward(self, x): x = torch.relu(self.linear(x)) x = x.view(-1, *self.output_shape[4]) x = self._forward_features(x) return x class BetaVAE_H(nn.Module): def __init__(self, input_shape=(64, 64), z_dim=10, nc=3, padding=0, activation=nn.Identity()): super(BetaVAE_H, self).__init__() self.z_dim = z_dim self.activation = activation self.encoder = Encoder_H(input_shape=input_shape, nc=nc, z_dim= z_dim, padding=padding) self.decoder = Decoder_H(self.encoder.dconv_size, nc=nc, z_dim= z_dim, padding=padding) def forward(self, x): distributions = self.encoder(x) mu = distributions[:, :self.z_dim] logvar = distributions[:, self.z_dim:] z = reparametrize(mu, logvar) x_recon = self.decoder(z) return self.activation(x_recon), mu, logvar def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 123008 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 961 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_exp_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 40 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 10 x1 = xindex // 10 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1), xmask) tmp1 = tl.load(in_ptr0 + (10 + x0 + 20 * x1), xmask) tmp5 = tl.load(in_ptr1 + x2, xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (32, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (20, 256), (256, 1)) assert_size_stride(primals_11, (20,), (1,)) assert_size_stride(primals_12, (256, 10), (10, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_17, (32,), (1,)) assert_size_stride(primals_18, (32, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_19, (32,), (1,)) assert_size_stride(primals_20, (32, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_21, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 31, 31), (30752, 961, 31, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(123008)](buf1, primals_2, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 14, 14), (6272, 196, 14, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(25088)](buf3, primals_5, 25088, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 6, 6), (2304, 36, 6, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(9216)](buf5, primals_7, 9216, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 2, 2), (256, 4, 2, 1)) buf7 = buf6 del buf6 buf24 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(1024)](buf7 , primals_9, buf24, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (4, 256), (256, 1), 0), reinterpret_tensor(primals_10, (256, 20), (1, 256 ), 0), alpha=1, beta=1, out=buf8) del primals_11 buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32) buf10 = torch.ops.aten.normal_functional.default(buf9) buf11 = buf10 del buf10 buf12 = buf9 del buf9 triton_poi_fused_add_div_exp_mul_4[grid(40)](buf8, buf11, buf12, 40, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_12, (10, 256), (1, 10), 0), out=buf13) buf14 = buf13 del buf13 buf23 = empty_strided_cuda((4, 256), (256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(1024)](buf14, primals_13, buf23, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf15 = extern_kernels.convolution(reinterpret_tensor(buf14, (4, 64, 2, 2), (256, 4, 2, 1), 0), primals_14, stride=(2, 2), padding=( 0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 6, 6), (2304, 36, 6, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_2[grid(9216)](buf16, primals_15, 9216, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf17 = extern_kernels.convolution(buf16, primals_16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 32, 14, 14), (6272, 196, 14, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_1[grid(25088)](buf18, primals_17, 25088, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf19 = extern_kernels.convolution(buf18, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf19, (4, 32, 31, 31), (30752, 961, 31, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_relu_0[grid(123008)](buf20, primals_19, 123008, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf21 = extern_kernels.convolution(buf20, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf22 = buf21 del buf21 triton_poi_fused_convolution_6[grid(49152)](buf22, primals_21, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 return (buf22, reinterpret_tensor(buf8, (4, 10), (20, 1), 0), reinterpret_tensor(buf8, (4, 10), (20, 1), 10), buf8, primals_1, primals_3, primals_4, primals_6, primals_8, primals_14, primals_16, primals_18, primals_20, buf1, buf3, buf5, reinterpret_tensor(buf7, (4, 256), (256, 1), 0), reinterpret_tensor(buf8, (4, 10), (20, 1), 10), buf11, buf12, reinterpret_tensor(buf14, (4, 64, 2, 2), (256, 4, 2, 1), 0), buf16, buf18, buf20, buf23, primals_12, primals_10, buf24) def reparametrize(mu, logvar): std = logvar.div(2).exp() eps = std.data.new(std.size()).normal_() return mu + std * eps class Encoder_H(nn.Module): def __init__(self, input_shape=(64, 64), z_dim=10, nc=3, padding=1): super(Encoder_H, self).__init__() self.conv2d_1 = nn.Conv2d(nc, 32, 4, 2, padding) self.conv2d_2 = nn.Conv2d(32, 32, 4, 2, padding) self.conv2d_3 = nn.Conv2d(32, 64, 4, 2, padding) self.conv2d_4 = nn.Conv2d(64, 64, 4, 2, padding) self.flatten_shape, self.dconv_size = self._get_conv_output(input_shape , nc) self.linear = nn.Linear(self.flatten_shape, z_dim * 2) def _get_conv_output(self, shape, nc): bs = 1 dummy_x = torch.empty(bs, nc, *shape) x, dconv_size = self._forward_features(dummy_x) flatten_shape = x.flatten(1).size(1) return flatten_shape, dconv_size def _forward_features(self, x): size0 = x.shape[1:] x = torch.relu(self.conv2d_1(x)) size1 = x.shape[1:] x = torch.relu(self.conv2d_2(x)) size2 = x.shape[1:] x = torch.relu(self.conv2d_3(x)) size3 = x.shape[1:] x = torch.relu(self.conv2d_4(x)) size4 = x.shape[1:] return x, [size0, size1, size2, size3, size4] def forward(self, x): x = torch.relu(self.conv2d_1(x)) x = torch.relu(self.conv2d_2(x)) x = torch.relu(self.conv2d_3(x)) x = torch.relu(self.conv2d_4(x)) x = self.linear(x.flatten(1)) return x class Decoder_H(nn.Module): def __init__(self, output_shape, z_dim=10, nc=3, padding=1): super(Decoder_H, self).__init__() self.output_shape = output_shape flatten_shape = output_shape[-1][0] * output_shape[-1][1 ] * output_shape[-1][2] self.linear = nn.Linear(z_dim, flatten_shape) self.conv2d_1 = nn.ConvTranspose2d(64, 64, 4, 2, padding) self.conv2d_2 = nn.ConvTranspose2d(64, 32, 4, 2, padding) self.conv2d_3 = nn.ConvTranspose2d(32, 32, 4, 2, padding) self.conv2d_4 = nn.ConvTranspose2d(32, nc, 4, 2, padding) def _forward_features(self, x): x = torch.relu(self.conv2d_1(x, self.output_shape[3][1:])) x = torch.relu(self.conv2d_2(x, self.output_shape[2][1:])) x = torch.relu(self.conv2d_3(x, self.output_shape[1][1:])) x = self.conv2d_4(x, self.output_shape[0][1:]) return x def forward(self, x): x = torch.relu(self.linear(x)) x = x.view(-1, *self.output_shape[4]) x = self._forward_features(x) return x class BetaVAE_HNew(nn.Module): def __init__(self, input_shape=(64, 64), z_dim=10, nc=3, padding=0, activation=nn.Identity()): super(BetaVAE_HNew, self).__init__() self.z_dim = z_dim self.activation = activation self.encoder = Encoder_H(input_shape=input_shape, nc=nc, z_dim= z_dim, padding=padding) self.decoder = Decoder_H(self.encoder.dconv_size, nc=nc, z_dim= z_dim, padding=padding) def forward(self, input_0): primals_1 = self.encoder.conv2d_1.weight primals_2 = self.encoder.conv2d_1.bias primals_4 = self.encoder.conv2d_2.weight primals_5 = self.encoder.conv2d_2.bias primals_6 = self.encoder.conv2d_3.weight primals_7 = self.encoder.conv2d_3.bias primals_8 = self.encoder.conv2d_4.weight primals_9 = self.encoder.conv2d_4.bias primals_10 = self.encoder.linear.weight primals_11 = self.encoder.linear.bias primals_12 = self.decoder.linear.weight primals_13 = self.decoder.linear.bias primals_14 = self.decoder.conv2d_1.weight primals_15 = self.decoder.conv2d_1.bias primals_16 = self.decoder.conv2d_2.weight primals_17 = self.decoder.conv2d_2.bias primals_18 = self.decoder.conv2d_3.weight primals_19 = self.decoder.conv2d_3.bias primals_20 = self.decoder.conv2d_4.weight primals_21 = self.decoder.conv2d_4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0], output[1], output[2]
KinWaiCheuk/Beta-VAE
BetaVAE_H
false
5,531
[ "MIT" ]
1
57f538320fed76b54e8489656b11dc83c06d1584
https://github.com/KinWaiCheuk/Beta-VAE/tree/57f538320fed76b54e8489656b11dc83c06d1584
import torch import torch.nn as nn def reparametrize(mu, logvar): std = logvar.div(2).exp() eps = std.data.new(std.size()).normal_() return mu + std * eps class Encoder_H(nn.Module): def __init__(self, input_shape=(64, 64), z_dim=10, nc=3, padding=1): super().__init__() self.conv2d_1 = nn.Conv2d(nc, 32, 4, 2, padding) self.conv2d_2 = nn.Conv2d(32, 32, 4, 2, padding) self.conv2d_3 = nn.Conv2d(32, 64, 4, 2, padding) self.conv2d_4 = nn.Conv2d(64, 64, 4, 2, padding) self.flatten_shape, self.dconv_size = self._get_conv_output(input_shape , nc) self.linear = nn.Linear(self.flatten_shape, z_dim * 2) def _get_conv_output(self, shape, nc): bs = 1 dummy_x = torch.empty(bs, nc, *shape) x, dconv_size = self._forward_features(dummy_x) flatten_shape = x.flatten(1).size(1) return flatten_shape, dconv_size def _forward_features(self, x): size0 = x.shape[1:] x = torch.relu(self.conv2d_1(x)) size1 = x.shape[1:] x = torch.relu(self.conv2d_2(x)) size2 = x.shape[1:] x = torch.relu(self.conv2d_3(x)) size3 = x.shape[1:] x = torch.relu(self.conv2d_4(x)) size4 = x.shape[1:] return x, [size0, size1, size2, size3, size4] def forward(self, x): x = torch.relu(self.conv2d_1(x)) x = torch.relu(self.conv2d_2(x)) x = torch.relu(self.conv2d_3(x)) x = torch.relu(self.conv2d_4(x)) x = self.linear(x.flatten(1)) return x class Decoder_H(nn.Module): def __init__(self, output_shape, z_dim=10, nc=3, padding=1): super().__init__() self.output_shape = output_shape flatten_shape = output_shape[-1][0] * output_shape[-1][1 ] * output_shape[-1][2] self.linear = nn.Linear(z_dim, flatten_shape) self.conv2d_1 = nn.ConvTranspose2d(64, 64, 4, 2, padding) self.conv2d_2 = nn.ConvTranspose2d(64, 32, 4, 2, padding) self.conv2d_3 = nn.ConvTranspose2d(32, 32, 4, 2, padding) self.conv2d_4 = nn.ConvTranspose2d(32, nc, 4, 2, padding) def _forward_features(self, x): x = torch.relu(self.conv2d_1(x, self.output_shape[3][1:])) x = torch.relu(self.conv2d_2(x, self.output_shape[2][1:])) x = torch.relu(self.conv2d_3(x, self.output_shape[1][1:])) x = self.conv2d_4(x, self.output_shape[0][1:]) return x def forward(self, x): x = torch.relu(self.linear(x)) x = x.view(-1, *self.output_shape[4]) x = self._forward_features(x) return x class Model(nn.Module): def __init__(self, input_shape=(64, 64), z_dim=10, nc=3, padding=0, activation=nn.Identity()): super().__init__() self.z_dim = z_dim self.activation = activation self.encoder = Encoder_H(input_shape=input_shape, nc=nc, z_dim= z_dim, padding=padding) self.decoder = Decoder_H(self.encoder.dconv_size, nc=nc, z_dim= z_dim, padding=padding) def forward(self, x): distributions = self.encoder(x) mu = distributions[:, :self.z_dim] logvar = distributions[:, self.z_dim:] z = reparametrize(mu, logvar) x_recon = self.decoder(z) return self.activation(x_recon), mu, logvar def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return []
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/u5/cu56dhpcth43gy4shrd7mcexf4nfa6qetnnhwe4mno4v6ug76h6j.py # Topologically Sorted Source Nodes: [att_out_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # att_out_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16, ), (1, )) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_4 del primals_5 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_6 del primals_7 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_8 del primals_9 # Topologically Sorted Source Nodes: [], Original ATen: [] buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0), None, True, scale=0.5) buf4 = buf3[0] buf5 = buf3[1] buf6 = buf3[2] buf7 = buf3[3] del buf3 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_out_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf4, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf9) del primals_11 return (reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0), buf4, buf5, buf6, buf7, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), primals_10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, dropout=0.1): super(SelfAttention, self).__init__() self.softmax = nn.Softmax(dim=-1) self._dropout = nn.Dropout(dropout) def forward(self, q, k, v, pad_mask=None): """ :param q: [bz, len_q, Q] :param k: [bz, len_k, K] :param v: [bz, len_v, V] :param pad_mask: [bz, len_q, len_k] 填充部分的mask more: Q==K, len_k==len_v :return: [bz, len_q, V] """ att_weights = torch.matmul(q, k.transpose(-1, -2)).div(math.sqrt(k. size(-1))) if pad_mask is not None: att_weights.masked_fill_(pad_mask, -1000000000.0) soft_att_weights = self.softmax(att_weights) if self.training: soft_att_weights = self._dropout(soft_att_weights) att_out = torch.matmul(soft_att_weights, v) return att_out class MultiHeadAttention(nn.Module): def __init__(self, d_model, d_k, d_v, nb_heads, dropout=0.1): super(MultiHeadAttention, self).__init__() self._d_model = d_model self._d_k = d_k self._d_v = d_v self._nb_heads = nb_heads self._linear_qs = nn.Linear(in_features=d_model, out_features=d_k * nb_heads) self._linear_ks = nn.Linear(in_features=d_model, out_features=d_k * nb_heads) self._linear_vs = nn.Linear(in_features=d_model, out_features=d_v * nb_heads) self._linear_out = nn.Linear(in_features=d_v * nb_heads, out_features=d_model) self._self_attention = SelfAttention(dropout) self._dropout = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self._linear_qs.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_ks.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_vs.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_out.weight, mean=0, std=math.sqrt(1 / self._d_model)) def forward(self, q, k, v, att_mask=None): """ :param q: [bz, len_q, d_model] :param k: [bz, len_k, d_model] :param v: [bz, len_v, d_model] :param att_mask: [bz, len_k] more: Q == K, len_k==len_v :return: [bz, len_q, d_model] """ bz, len_q, _ = q.size() bz, len_k, _ = k.size() bz, len_v, _ = v.size() q_fc = self._linear_qs(q).reshape(bz, len_q, self._nb_heads, -1 ).transpose(1, 2) k_fc = self._linear_ks(k).reshape(bz, len_k, self._nb_heads, -1 ).transpose(1, 2) v_fc = self._linear_vs(v).reshape(bz, len_v, self._nb_heads, -1 ).transpose(1, 2) if att_mask is not None: att_mask = att_mask[:, None, None, :] att_out = self._self_attention(q_fc, k_fc, v_fc, att_mask) att_out = att_out.transpose(1, 2).reshape(bz, len_q, -1) multi_head = self._linear_out(att_out) if self.training: multi_head = self._dropout(multi_head) return multi_head def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'nb_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_4 del primals_5 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_6 del primals_7 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_8 del primals_9 buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default( reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0), None, True, scale=0.5) buf4 = buf3[0] buf5 = buf3[1] buf6 = buf3[2] buf7 = buf3[3] del buf3 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf4, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf9) del primals_11 return reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0 ), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 16, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0 ), buf4, buf5, buf6, buf7, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), primals_10 class SelfAttention(nn.Module): def __init__(self, dropout=0.1): super(SelfAttention, self).__init__() self.softmax = nn.Softmax(dim=-1) self._dropout = nn.Dropout(dropout) def forward(self, q, k, v, pad_mask=None): """ :param q: [bz, len_q, Q] :param k: [bz, len_k, K] :param v: [bz, len_v, V] :param pad_mask: [bz, len_q, len_k] 填充部分的mask more: Q==K, len_k==len_v :return: [bz, len_q, V] """ att_weights = torch.matmul(q, k.transpose(-1, -2)).div(math.sqrt(k. size(-1))) if pad_mask is not None: att_weights.masked_fill_(pad_mask, -1000000000.0) soft_att_weights = self.softmax(att_weights) if self.training: soft_att_weights = self._dropout(soft_att_weights) att_out = torch.matmul(soft_att_weights, v) return att_out class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, d_k, d_v, nb_heads, dropout=0.1): super(MultiHeadAttentionNew, self).__init__() self._d_model = d_model self._d_k = d_k self._d_v = d_v self._nb_heads = nb_heads self._linear_qs = nn.Linear(in_features=d_model, out_features=d_k * nb_heads) self._linear_ks = nn.Linear(in_features=d_model, out_features=d_k * nb_heads) self._linear_vs = nn.Linear(in_features=d_model, out_features=d_v * nb_heads) self._linear_out = nn.Linear(in_features=d_v * nb_heads, out_features=d_model) self._self_attention = SelfAttention(dropout) self._dropout = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self._linear_qs.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_ks.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_vs.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_out.weight, mean=0, std=math.sqrt(1 / self._d_model)) def forward(self, input_0, input_1, input_2): primals_4 = self._linear_qs.weight primals_5 = self._linear_qs.bias primals_6 = self._linear_ks.weight primals_7 = self._linear_ks.bias primals_8 = self._linear_vs.weight primals_9 = self._linear_vs.bias primals_10 = self._linear_out.weight primals_11 = self._linear_out.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
LindgeW/DomainAdaption4DependencyParsing
MultiHeadAttention
false
5,532
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import math import torch import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, dropout=0.1): super().__init__() self.softmax = nn.Softmax(dim=-1) self._dropout = nn.Dropout(dropout) def forward(self, q, k, v, pad_mask=None): """ :param q: [bz, len_q, Q] :param k: [bz, len_k, K] :param v: [bz, len_v, V] :param pad_mask: [bz, len_q, len_k] 填充部分的mask more: Q==K, len_k==len_v :return: [bz, len_q, V] """ att_weights = torch.matmul(q, k.transpose(-1, -2)).div(math.sqrt(k. size(-1))) if pad_mask is not None: att_weights.masked_fill_(pad_mask, -1000000000.0) soft_att_weights = self.softmax(att_weights) if self.training: soft_att_weights = self._dropout(soft_att_weights) att_out = torch.matmul(soft_att_weights, v) return att_out class Model(nn.Module): def __init__(self, d_model, d_k, d_v, nb_heads, dropout=0.1): super().__init__() self._d_model = d_model self._d_k = d_k self._d_v = d_v self._nb_heads = nb_heads self._linear_qs = nn.Linear(in_features=d_model, out_features=d_k * nb_heads) self._linear_ks = nn.Linear(in_features=d_model, out_features=d_k * nb_heads) self._linear_vs = nn.Linear(in_features=d_model, out_features=d_v * nb_heads) self._linear_out = nn.Linear(in_features=d_v * nb_heads, out_features=d_model) self._self_attention = SelfAttention(dropout) self._dropout = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): nn.init.normal_(self._linear_qs.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_ks.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_vs.weight, mean=0, std=math.sqrt(1 / self._d_model)) nn.init.normal_(self._linear_out.weight, mean=0, std=math.sqrt(1 / self._d_model)) def forward(self, q, k, v, att_mask=None): """ :param q: [bz, len_q, d_model] :param k: [bz, len_k, d_model] :param v: [bz, len_v, d_model] :param att_mask: [bz, len_k] more: Q == K, len_k==len_v :return: [bz, len_q, d_model] """ bz, len_q, _ = q.size() bz, len_k, _ = k.size() bz, len_v, _ = v.size() q_fc = self._linear_qs(q).reshape(bz, len_q, self._nb_heads, -1 ).transpose(1, 2) k_fc = self._linear_ks(k).reshape(bz, len_k, self._nb_heads, -1 ).transpose(1, 2) v_fc = self._linear_vs(v).reshape(bz, len_v, self._nb_heads, -1 ).transpose(1, 2) if att_mask is not None: att_mask = att_mask[:, None, None, :] att_out = self._self_attention(q_fc, k_fc, v_fc, att_mask) att_out = att_out.transpose(1, 2).reshape(bz, len_q, -1) multi_head = self._linear_out(att_out) if self.training: multi_head = self._dropout(multi_head) return multi_head def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [4, 4, 4, 4]
AttnMerge
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ry/cry3o7ciiisvaqxldoaatx6nejwjg2z36x5a6gra4n32ey4vmhsr.py # Topologically Sorted Source Nodes: [alpha, mul, out], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # alpha => div, sum_1 # mul => mul # out => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused__softmax_mul_sum_1 = async_compile.triton('triton_poi_fused__softmax_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp8 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp1 / tmp6 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tmp3 / tmp6 tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tmp5 / tmp6 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hx], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha, mul, out], Original ATen: [aten._softmax, aten.mul, aten.sum] triton_poi_fused__softmax_mul_sum_1.run(buf1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del buf1 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AttnMerge(nn.Module): def __init__(self, hn_size): super(AttnMerge, self).__init__() self.fc = nn.Linear(hn_size, hn_size, bias=False) def forward(self, x): hx = self.fc(x) alpha = F.softmax(hx, dim=1) out = torch.sum(alpha * x, dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp1 / tmp6 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tmp3 / tmp6 tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tmp5 / tmp6 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_mul_sum_1[grid(64)](buf1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf2, primals_2, buf0 class AttnMergeNew(nn.Module): def __init__(self, hn_size): super(AttnMergeNew, self).__init__() self.fc = nn.Linear(hn_size, hn_size, bias=False) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
LindgeW/DomainAdaption4DependencyParsing
AttnMerge
false
5,533
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, hn_size): super().__init__() self.fc = nn.Linear(hn_size, hn_size, bias=False) def forward(self, x): hx = self.fc(x) alpha = F.softmax(hx, dim=1) out = torch.sum(alpha * x, dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Highway
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oy/coygqjpoh5r5lolh6b5eagjttwpgk4w2ou6uwxyoprhw5g2ddsgw.py # Topologically Sorted Source Nodes: [gate, mul, sub, mul_1, x], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # gate => sigmoid # mul => mul # mul_1 => mul_1 # sub => sub # x => add # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gate, mul, sub, mul_1, x], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_0.run(buf0, primals_4, primals_3, buf1, 256, grid=grid(256), stream=stream0) return (buf1, primals_3, primals_4, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Highway(nn.Module): def __init__(self, x_hidden): super(Highway, self).__init__() self.lin = nn.Linear(x_hidden, x_hidden) def forward(self, x1, x2): gate = torch.sigmoid(self.lin(x1)) x = torch.mul(gate, x2) + torch.mul(1 - gate, x1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'x_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_0[grid(256)](buf0, primals_4, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_3, primals_4, buf0 class HighwayNew(nn.Module): def __init__(self, x_hidden): super(HighwayNew, self).__init__() self.lin = nn.Linear(x_hidden, x_hidden) def forward(self, input_0, input_1): primals_1 = self.lin.weight primals_2 = self.lin.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
LinXueyuanStdio/EchoEA
Highway
false
5,534
[ "Apache-2.0" ]
1
d9b8564023cca71678dec44cf8cab3f91736448a
https://github.com/LinXueyuanStdio/EchoEA/tree/d9b8564023cca71678dec44cf8cab3f91736448a
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, x_hidden): super().__init__() self.lin = nn.Linear(x_hidden, x_hidden) def forward(self, x1, x2): gate = torch.sigmoid(self.lin(x1)) x = torch.mul(gate, x2) + torch.mul(1 - gate, x1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ChannelPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/uc/cucdaa5tqnxykdmw5yqh7ir5ac35phopjcobljrg4rrtlnfjtuwd.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 2 x0 = xindex % 16 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 2, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = 4.0 tmp25 = tmp23 / tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp14, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp13, tmp27) tl.store(out_ptr0 + (x3), tmp28, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ChannelPool(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1) .unsqueeze(1)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = 4.0 tmp25 = tmp23 / tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp14, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp13, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ChannelPoolNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Linus4world/mrs-gan
ChannelPool
false
5,535
[ "BSD-2-Clause" ]
1
64669251584a7421cce3a5173983a2275dcb438a
https://github.com/Linus4world/mrs-gan/tree/64669251584a7421cce3a5173983a2275dcb438a
import torch import torch.nn as nn class Model(nn.Module): def forward(self, x): return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1) .unsqueeze(1)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ResidualDenseBlock_3C
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdv3m5a33kovvtng5iilth4k6mtnyfcota6hhwoiqm34iumu7wi.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/er/cerxfmiafmpnsn44ihnsss2pvd76g6x2i62qavnas5ygqo5mj43d.py # Topologically Sorted Source Nodes: [cat, x_3], Original ATen: [aten.cat, aten.constant_pad_nd] # Source node to ATen node mapping: # cat => cat # x_3 => constant_pad_nd_1 # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where], 1), kwargs = {}) # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_cat_constant_pad_nd_1 = async_compile.triton('triton_poi_fused_cat_constant_pad_nd_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_constant_pad_nd_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_constant_pad_nd_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) % 8 x3 = (xindex // 288) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp12 = tmp11 >= tmp1 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2) + (64*x3)), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp10 tmp20 = tl.load(in_ptr1 + ((-5) + x0 + (4*x1) + (16*((-4) + x2)) + (64*x3)), tmp19 & xmask, other=0.0) tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.2 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp19, tmp25, tmp26) tmp28 = tl.where(tmp13, tmp15, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp10, tmp28, tmp29) tl.store(out_ptr0 + (x6), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s3/cs34ah7ioinuak53u3oyhgsrsqeqfnovsb4djwnzfgkw44v6u547.py # Topologically Sorted Source Nodes: [cat_1, x_6], Original ATen: [aten.cat, aten.constant_pad_nd] # Source node to ATen node mapping: # cat_1 => cat_1 # x_6 => constant_pad_nd_2 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %where, %where_1], 1), kwargs = {}) # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat_1, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_cat_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_cat_constant_pad_nd_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) % 12 x3 = (xindex // 432) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp12 = tmp11 >= tmp1 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2) + (64*x3)), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp19 & tmp10 tmp21 = tl.load(in_ptr1 + ((-5) + x0 + (4*x1) + (16*((-4) + x2)) + (64*x3)), tmp20 & xmask, other=0.0) tmp22 = 0.0 tmp23 = tmp21 > tmp22 tmp24 = 0.2 tmp25 = tmp21 * tmp24 tmp26 = tl.where(tmp23, tmp21, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp20, tmp26, tmp27) tmp29 = tmp11 >= tmp17 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp11 < tmp30 tmp32 = tmp29 & tmp10 tmp33 = tl.load(in_ptr2 + ((-5) + x0 + (4*x1) + (16*((-8) + x2)) + (64*x3)), tmp32 & xmask, other=0.0) tmp34 = tmp33 > tmp22 tmp35 = tmp33 * tmp24 tmp36 = tl.where(tmp34, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp32, tmp36, tmp37) tmp39 = tl.where(tmp19, tmp28, tmp38) tmp40 = tl.where(tmp13, tmp15, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp10, tmp40, tmp41) tl.store(out_ptr0 + (x6), tmp42, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ya/cyaqnnpliwjolqezg24obzz6ccba4jeqnoqwry7pv7itm4cqy6ao.py # Topologically Sorted Source Nodes: [x_8, mul, x3], Original ATen: [aten.leaky_relu, aten.mul, aten.add, aten.leaky_relu_backward] # Source node to ATen node mapping: # mul => mul_3 # x3 => add # x_8 => gt_2, mul_2, where_2 # Graph fragment: # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_2, 0.1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_2, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp8 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.1 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp5 > tmp1 tl.store(out_ptr0 + (x0), tmp9, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/my/cmymu55pgdw2f6aywkz53vy2ux4ld5ava4e5srxurzv3xcxqknfg.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # x_5 => gt_1, mul_1, where_1 # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_4 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4, 12, 3, 3), (108, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [cat, x_3], Original ATen: [aten.cat, aten.constant_pad_nd] triton_poi_fused_cat_constant_pad_nd_1.run(primals_1, buf1, buf2, 1152, grid=grid(1152), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1, x_6], Original ATen: [aten.cat, aten.constant_pad_nd] triton_poi_fused_cat_constant_pad_nd_2.run(primals_1, buf1, buf3, buf4, 1728, grid=grid(1728), stream=stream0) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_8, mul, x3], Original ATen: [aten.leaky_relu, aten.mul, aten.add, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3.run(buf5, primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del primals_1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_4.run(buf3, buf8, 256, grid=grid(256), stream=stream0) del buf3 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_4.run(buf1, buf9, 256, grid=grid(256), stream=stream0) del buf1 return (buf6, primals_2, primals_3, primals_4, buf0, buf2, buf4, buf7, buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class ResidualDenseBlock_3C(nn.Module): def __init__(self, in_channels, latent_channels, kernel_size=3, stride= 1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm ='none', sn=False): super(ResidualDenseBlock_3C, self).__init__() self.conv1 = Conv2dLayer(in_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv2 = Conv2dLayer(in_channels + latent_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv3 = Conv2dLayer(in_channels + latent_channels * 2, in_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, x): residual = x x1 = self.conv1(x) x2 = self.conv2(torch.cat((x, x1), 1)) x3 = self.conv3(torch.cat((x, x1, x2), 1)) x3 = 0.1 * x3 + residual return x3 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'latent_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_cat_constant_pad_nd_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 % 8 x3 = xindex // 288 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2 + 64 * x3), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tl.full([1], 8, tl.int64) tmp19 = tmp16 & tmp10 tmp20 = tl.load(in_ptr1 + (-5 + x0 + 4 * x1 + 16 * (-4 + x2) + 64 * x3), tmp19 & xmask, other=0.0) tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.2 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp19, tmp25, tmp26) tmp28 = tl.where(tmp13, tmp15, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp10, tmp28, tmp29) tl.store(out_ptr0 + x6, tmp30, xmask) @triton.jit def triton_poi_fused_cat_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 % 12 x3 = xindex // 432 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = x2 tmp13 = tmp11 < tmp3 tmp14 = tmp13 & tmp10 tmp15 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2 + 64 * x3), tmp14 & xmask, other=0.0) tmp16 = tmp11 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp11 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp19 & tmp10 tmp21 = tl.load(in_ptr1 + (-5 + x0 + 4 * x1 + 16 * (-4 + x2) + 64 * x3), tmp20 & xmask, other=0.0) tmp22 = 0.0 tmp23 = tmp21 > tmp22 tmp24 = 0.2 tmp25 = tmp21 * tmp24 tmp26 = tl.where(tmp23, tmp21, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp20, tmp26, tmp27) tmp29 = tmp11 >= tmp17 tl.full([1], 12, tl.int64) tmp32 = tmp29 & tmp10 tmp33 = tl.load(in_ptr2 + (-5 + x0 + 4 * x1 + 16 * (-8 + x2) + 64 * x3), tmp32 & xmask, other=0.0) tmp34 = tmp33 > tmp22 tmp35 = tmp33 * tmp24 tmp36 = tl.where(tmp34, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp32, tmp36, tmp37) tmp39 = tl.where(tmp19, tmp28, tmp38) tmp40 = tl.where(tmp13, tmp15, tmp39) tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp10, tmp40, tmp41) tl.store(out_ptr0 + x6, tmp42, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.1 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp5 > tmp1 tl.store(out_ptr0 + x0, tmp9, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (4, 12, 3, 3), (108, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32) triton_poi_fused_cat_constant_pad_nd_1[grid(1152)](primals_1, buf1, buf2, 1152, XBLOCK=128, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 12, 6, 6), (432, 36, 6, 1), torch.float32 ) triton_poi_fused_cat_constant_pad_nd_2[grid(1728)](primals_1, buf1, buf3, buf4, 1728, XBLOCK=128, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_3[grid(256)]( buf5, primals_1, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_4[grid(256)](buf3, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_4[grid(256)](buf1, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return (buf6, primals_2, primals_3, primals_4, buf0, buf2, buf4, buf7, buf8, buf9) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class ResidualDenseBlock_3CNew(nn.Module): def __init__(self, in_channels, latent_channels, kernel_size=3, stride= 1, padding=1, dilation=1, pad_type='zero', activation='lrelu', norm ='none', sn=False): super(ResidualDenseBlock_3CNew, self).__init__() self.conv1 = Conv2dLayer(in_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv2 = Conv2dLayer(in_channels + latent_channels, latent_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) self.conv3 = Conv2dLayer(in_channels + latent_channels * 2, in_channels, kernel_size, stride, padding, dilation, pad_type, activation, norm, sn) def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv2.conv2d.weight primals_4 = self.conv3.conv2d.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
LenKerr/Semantic-Colorization-GAN
ResidualDenseBlock_3C
false
5,536
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super().__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.acti # ... truncated (>4000 chars) for memory efficiency
KMaxPool1d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qf/cqfhskey3zun6jxe3jyn6ntmnpvxnpmqob4lhxlqwvyiiesmz5gl.py # Topologically Sorted Source Nodes: [sort, gather], Original ATen: [aten.sort, aten.gather] # Source node to ATen node mapping: # gather => gather # sort => sort # Graph fragment: # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%getitem_1, 2), kwargs = {}) # %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 2, %getitem_2), kwargs = {}) triton_per_fused_gather_sort_0 = async_compile.triton('triton_per_fused_gather_sort_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_gather_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_gather_sort_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = r1 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tmp7 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp8 = tmp5 + tmp7 tmp9 = tmp5 < 0 tmp10 = tl.where(tmp9, tmp8, tmp5) tl.device_assert(((0 <= tmp10) & (tmp10 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp10 < 4") tmp12 = tl.load(in_ptr1 + (tmp10 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr1 + (r1 + (4*x0)), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [topk], Original ATen: [aten.topk] buf0 = torch.ops.aten.topk.default(arg0_1, 4, 2) buf2 = buf0[1] del buf0 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sort, gather], Original ATen: [aten.sort, aten.gather] stream0 = get_raw_stream(0) triton_per_fused_gather_sort_0.run(buf2, arg0_1, buf5, 16, 4, grid=grid(16), stream=stream0) del arg0_1 del buf2 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class KMaxPool1d(nn.Module): def __init__(self, top_k: 'int'): super(KMaxPool1d, self).__init__() self.top_k = top_k def forward(self, inputs): assert inputs.dim() == 3 top_idxs = torch.topk(inputs, k=self.top_k, dim=2)[1] sorted_top_idxs = top_idxs.sort(dim=2)[0] return inputs.gather(dim=2, index=sorted_top_idxs) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'top_k': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_gather_sort_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = r1 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5, _tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tmp7 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp8 = tmp5 + tmp7 tmp9 = tmp5 < 0 tmp10 = tl.where(tmp9, tmp8, tmp5) tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask, 'index out of bounds: 0 <= tmp10 < 4') tmp12 = tl.load(in_ptr1 + (tmp10 + 4 * x0), xmask, eviction_policy= 'evict_last') tl.store(out_ptr1 + (r1 + 4 * x0), tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.topk.default(arg0_1, 4, 2) buf2 = buf0[1] del buf0 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_gather_sort_0[grid(16)](buf2, arg0_1, buf5, 16, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del buf2 return buf5, class KMaxPool1dNew(nn.Module): def __init__(self, top_k: 'int'): super(KMaxPool1dNew, self).__init__() self.top_k = top_k def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LindgeW/DomainAdaption4DependencyParsing
KMaxPool1d
false
5,537
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, top_k: 'int'): super().__init__() self.top_k = top_k def forward(self, inputs): assert inputs.dim() == 3 top_idxs = torch.topk(inputs, k=self.top_k, dim=2)[1] sorted_top_idxs = top_idxs.sort(dim=2)[0] return inputs.gather(dim=2, index=sorted_top_idxs) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
Biaffine
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/bc/cbcett6ey62xkijoadrmiqwnmvdqa242vrdqwxiw4pvecwqjoged.py # Topologically Sorted Source Nodes: [input1], Original ATen: [aten.cat] # Source node to ATen node mapping: # input1 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (5, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [input1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [affine], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 5), (5, 1), 0), reinterpret_tensor(primals_3, (5, 5), (1, 5), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [input2], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0) del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 5), (20, 5, 1), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3) del buf1 return (reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 5), (5, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((5, 5), (5, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Biaffine(nn.Module): def __init__(self, in_features, out_features=1, bias=(True, True)): super(Biaffine, self).__init__() self.in_features = in_features self.out_features = out_features self.bias = bias self.linear_input_size = in_features + bias[0] self.linear_output_size = out_features * (in_features + bias[1]) self.linear = nn.Linear(in_features=self.linear_input_size, out_features=self.linear_output_size, bias=False) self.reset_parameters() @property def weight(self): return self.linear.weight def reset_parameters(self): nn.init.xavier_uniform_(self.linear.weight) def forward(self, input1, input2): batch_size, len1, _dim1 = input1.size() batch_size, len2, _dim2 = input2.size() if self.bias[0]: ones = input1.data.new_ones(batch_size, len1, 1) input1 = torch.cat((input1, ones), dim=-1) if self.bias[1]: ones = input2.data.new_ones(batch_size, len2, 1) input2 = torch.cat((input2, ones), dim=-1) affine = self.linear(input1) affine = affine.reshape(batch_size, len1 * self.out_features, -1) biaffine = torch.bmm(affine, input2.transpose(1, 2)).transpose(1, 2) biaffine = biaffine.reshape((batch_size, len2, len1, -1)).squeeze(-1) return biaffine def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (5, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 5), (5, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 5), (5, 1), 0), reinterpret_tensor(primals_3, (5, 5), (1, 5), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 5), (20, 5, 1), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), out=buf3) del buf1 return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (16, 5), (5, 1), 0), buf2 class BiaffineNew(nn.Module): def __init__(self, in_features, out_features=1, bias=(True, True)): super(BiaffineNew, self).__init__() self.in_features = in_features self.out_features = out_features self.bias = bias self.linear_input_size = in_features + bias[0] self.linear_output_size = out_features * (in_features + bias[1]) self.linear = nn.Linear(in_features=self.linear_input_size, out_features=self.linear_output_size, bias=False) self.reset_parameters() @property def weight(self): return self.linear.weight def reset_parameters(self): nn.init.xavier_uniform_(self.linear.weight) def forward(self, input_0, input_1): primals_3 = self.linear.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
LindgeW/DomainAdaption4DependencyParsing
Biaffine
false
5,538
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, out_features=1, bias=(True, True)): super().__init__() self.in_features = in_features self.out_features = out_features self.bias = bias self.linear_input_size = in_features + bias[0] self.linear_output_size = out_features * (in_features + bias[1]) self.linear = nn.Linear(in_features=self.linear_input_size, out_features=self.linear_output_size, bias=False) self.reset_parameters() @property def weight(self): return self.linear.weight def reset_parameters(self): nn.init.xavier_uniform_(self.linear.weight) def forward(self, input1, input2): batch_size, len1, _dim1 = input1.size() batch_size, len2, _dim2 = input2.size() if self.bias[0]: ones = input1.data.new_ones(batch_size, len1, 1) input1 = torch.cat((input1, ones), dim=-1) if self.bias[1]: ones = input2.data.new_ones(batch_size, len2, 1) input2 = torch.cat((input2, ones), dim=-1) affine = self.linear(input1) affine = affine.reshape(batch_size, len1 * self.out_features, -1) biaffine = torch.bmm(affine, input2.transpose(1, 2)).transpose(1, 2) biaffine = biaffine.reshape((batch_size, len2, len1, -1)).squeeze(-1) return biaffine def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3i/c3iedxosa453ppy7izskpdsxaah4sgqlvqpbehs3lyv5xhslwtpv.py # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # norm => sqrt # out => mul # pow_1 => pow_1 # sum_1 => sum_1 # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) triton_poi_fused_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tmp15 = tmp13 * tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_pow_sqrt_sum_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, channels, scale): super(L2Norm, self).__init__() self.channels = channels self.scale = scale self.rescale_factors = nn.Parameter(torch.FloatTensor(1, channels, 1, 1)) self.reset_params() def reset_params(self): init.constant_(self.rescale_factors, self.scale) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() x = x / norm out = x * self.rescale_factors return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tmp15 = tmp13 * tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_pow_sqrt_sum_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, channels, scale): super(L2NormNew, self).__init__() self.channels = channels self.scale = scale self.rescale_factors = nn.Parameter(torch.FloatTensor(1, channels, 1, 1)) self.reset_params() def reset_params(self): init.constant_(self.rescale_factors, self.scale) def forward(self, input_0): primals_2 = self.rescale_factors primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Liuhongzhi2018/Car_detection
L2Norm
false
5,539
[ "MIT" ]
1
f32fea9c348c691ccc30b9804a4f3fa32732bbae
https://github.com/Liuhongzhi2018/Car_detection/tree/f32fea9c348c691ccc30b9804a4f3fa32732bbae
import torch import torch.nn as nn import torch.nn.init as init class Model(nn.Module): def __init__(self, channels, scale): super().__init__() self.channels = channels self.scale = scale self.rescale_factors = nn.Parameter(torch.FloatTensor(1, channels, 1, 1)) self.reset_params() def reset_params(self): init.constant_(self.rescale_factors, self.scale) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() x = x / norm out = x * self.rescale_factors return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 1.0]
PositionWiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zy/czylxf6rfbnbz2ddgd3xovxwjqnfen7sgqej5mnv46j2fekwnniz.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # output => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mx/cmxyblvny6ryx4zft5bojylqtaa5rz2epkem2quffgbnvnc2z4fy.py # Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std] # Source node to ATen node mapping: # add => add # mean => mean # std => var # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True}) triton_poi_fused_add_mean_std_1 = async_compile.triton('triton_poi_fused_add_mean_std_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_std_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + (x0), tmp29, xmask) tl.store(out_ptr0 + (x0), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/da/cdabe3pqqspscvgqgnm3vaiwdzwvoew5ch5fapm37mrjq7zo4e55.py # Topologically Sorted Source Nodes: [add, mean, std, sub, add_1, ln_out, mul, ln_out_1], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # ln_out => div # ln_out_1 => add_2 # mean => mean # mul => mul # std => sqrt # sub => sub # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %div), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_7), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = libdevice.sqrt(tmp6) tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2048, 4), (4, 1)) assert_size_stride(primals_3, (2048, ), (1, )) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf7, 131072, grid=grid(131072), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std] triton_poi_fused_add_mean_std_1.run(buf4, buf2, primals_1, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mean, std, sub, add_1, ln_out, mul, ln_out_1], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_std_sub_2.run(primals_6, buf2, primals_1, buf5, buf4, primals_7, buf6, 256, grid=grid(256), stream=stream0) del buf4 del buf5 del primals_7 return (buf6, primals_1, primals_6, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), buf2, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init as init class LayerNorm(nn.Module): def __init__(self, d_hid, eps=1e-06): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, x): mean = x.mean(dim=-1, keepdim=True) std = x.std(dim=-1, keepdim=True) ln_out = (x - mean) / (std + self.eps) ln_out = self.gamma * ln_out + self.beta return ln_out class Linear(nn.Module): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) init.xavier_normal_(self.linear.weight) init.zeros_(self.linear.bias) def forward(self, inputs): return self.linear(inputs) class PositionWiseFeedForward(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.1): super(PositionWiseFeedForward, self).__init__() self.relu = nn.ReLU() self.fc1 = Linear(d_model, d_ff) self.fc2 = Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) self.layernorm = LayerNorm(d_model) def forward(self, x): residual = x output = self.relu(self.fc1(x)) output = self.dropout(self.fc2(output)) output = self.layernorm(output + residual) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_add_mean_std_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = 3.0 tmp29 = tmp27 / tmp28 tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = libdevice.sqrt(tmp6) tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2048, 4), (4, 1)) assert_size_stride(primals_3, (2048,), (1,)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2048), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1, primals_3, buf7, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_mean_std_1[grid(64)](buf4, buf2, primals_1, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_2[grid(256)](primals_6, buf2, primals_1, buf5, buf4, primals_7, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del buf5 del primals_7 return buf6, primals_1, primals_6, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), buf2, primals_4, buf7 class LayerNorm(nn.Module): def __init__(self, d_hid, eps=1e-06): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, x): mean = x.mean(dim=-1, keepdim=True) std = x.std(dim=-1, keepdim=True) ln_out = (x - mean) / (std + self.eps) ln_out = self.gamma * ln_out + self.beta return ln_out class Linear(nn.Module): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) init.xavier_normal_(self.linear.weight) init.zeros_(self.linear.bias) def forward(self, inputs): return self.linear(inputs) class PositionWiseFeedForwardNew(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.1): super(PositionWiseFeedForwardNew, self).__init__() self.relu = nn.ReLU() self.fc1 = Linear(d_model, d_ff) self.fc2 = Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) self.layernorm = LayerNorm(d_model) def forward(self, input_0): primals_2 = self.fc1.linear.weight primals_3 = self.fc1.linear.bias primals_4 = self.fc2.linear.weight primals_5 = self.fc2.linear.bias primals_6 = self.layernorm.gamma primals_7 = self.layernorm.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Lhx94As/PHO-LID
PositionWiseFeedForward
false
5,540
[ "MIT" ]
1
44843b25b977dd6e0b77b520dbe3f2ff1ea633cd
https://github.com/Lhx94As/PHO-LID/tree/44843b25b977dd6e0b77b520dbe3f2ff1ea633cd
import torch import torch.nn as nn import torch.nn.init as init class LayerNorm(nn.Module): def __init__(self, d_hid, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(d_hid)) self.beta = nn.Parameter(torch.zeros(d_hid)) self.eps = eps def forward(self, x): mean = x.mean(dim=-1, keepdim=True) std = x.std(dim=-1, keepdim=True) ln_out = (x - mean) / (std + self.eps) ln_out = self.gamma * ln_out + self.beta return ln_out class Linear(nn.Module): def __init__(self, in_features, out_features, bias=True): super().__init__() self.linear = nn.Linear(in_features, out_features, bias=bias) init.xavier_normal_(self.linear.weight) init.zeros_(self.linear.bias) def forward(self, inputs): return self.linear(inputs) class Model(nn.Module): def __init__(self, d_model, d_ff=2048, dropout=0.1): super().__init__() self.relu = nn.ReLU() self.fc1 = Linear(d_model, d_ff) self.fc2 = Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) self.layernorm = LayerNorm(d_model) def forward(self, x): residual = x output = self.relu(self.fc1(x)) output = self.dropout(self.fc2(output)) output = self.layernorm(output + residual) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
VAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/iv/civ5tj3syvyx4mu3v7dhhyywq75qhwo5ijklk46fa3gmvy43dnjf.py # Topologically Sorted Source Nodes: [mul, std, mul_1, z_1, add_1, pow_1, sub, exp_1, sub_1, sum_1, mul_2, kl_loss], Original ATen: [aten.mul, aten.exp, aten.add, aten.pow, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # add_1 => add_1 # exp_1 => exp_1 # kl_loss => div # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # std => exp # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # z_1 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %view_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %pow_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%view_3,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %exp_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 4), kwargs = {}) triton_per_fused_add_div_exp_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_exp_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (r2), None) tmp6 = tl.load(in_ptr2 + (r2), None) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tmp8 = tl_math.exp(tmp1) tmp9 = 1.0 tmp10 = tmp1 + tmp9 tmp11 = tmp6 * tmp6 tmp12 = tmp10 - tmp11 tmp13 = tmp12 - tmp8 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = -0.5 tmp18 = tmp16 * tmp17 tmp19 = 0.25 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (tl.broadcast_to(r2, [RBLOCK])), tmp7, None) tl.store(out_ptr1 + (tl.broadcast_to(r2, [RBLOCK])), tmp8, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qx/cqxtiuijob6zwtx2fbrdeze2bxnzigjok4rc5o4e7udmnr4hlmi7.py # Topologically Sorted Source Nodes: [dist_loss], Original ATen: [aten.mse_loss] # Source node to ATen node mapping: # dist_loss => mean, pow_2, sub_2 # Graph fragment: # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %primals_3), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) triton_per_fused_mse_loss_1 = async_compile.triton('triton_per_fused_mse_loss_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logv], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 # Topologically Sorted Source Nodes: [z], Original ATen: [aten.randn] buf2 = torch.ops.aten.randn.default([4, 4], device=device(type='cuda', index=0), pin_memory=False) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((), (), torch.float32) buf10 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [mul, std, mul_1, z_1, add_1, pow_1, sub, exp_1, sub_1, sum_1, mul_2, kl_loss], Original ATen: [aten.mul, aten.exp, aten.add, aten.pow, aten.sub, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_exp_mul_pow_sub_sum_0.run(buf10, buf3, buf1, buf0, buf4, buf6, 1, 256, grid=grid(1), stream=stream0) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [restruct_hidden], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 buf8 = empty_strided_cuda((), (), torch.float32) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [dist_loss], Original ATen: [aten.mse_loss] triton_per_fused_mse_loss_1.run(buf9, buf5, primals_3, 1, 256, grid=grid(1), stream=stream0) return (buf9, buf10, primals_3, buf0, buf1, buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5, buf6, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class VAE(nn.Module): def __init__(self, input_dim, latent_dim): super(VAE, self).__init__() self.latent_dim = latent_dim self.hidden2mean = nn.Linear(input_dim, latent_dim) self.hidden2logv = nn.Linear(input_dim, latent_dim) self.latent2hidden = nn.Linear(latent_dim, input_dim) self.reset_params() def reset_params(self): nn.init.xavier_uniform_(self.hidden2mean.weight) nn.init.xavier_uniform_(self.hidden2logv.weight) nn.init.xavier_uniform_(self.latent2hidden.weight) def forward(self, h): mean = self.hidden2mean(h) logv = self.hidden2logv(h) std = torch.exp(0.5 * logv) z = torch.randn((h.size(0), self.latent_dim), device=h.device) z = z * std + mean restruct_hidden = self.latent2hidden(z) kl_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp() ) / logv.size(0) dist_loss = F.mse_loss(restruct_hidden, h) return dist_loss, kl_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'latent_dim': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + r2, None) tmp6 = tl.load(in_ptr2 + r2, None) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tmp8 = tl_math.exp(tmp1) tmp9 = 1.0 tmp10 = tmp1 + tmp9 tmp11 = tmp6 * tmp6 tmp12 = tmp10 - tmp11 tmp13 = tmp12 - tmp8 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = -0.5 tmp18 = tmp16 * tmp17 tmp19 = 0.25 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + tl.broadcast_to(r2, [RBLOCK]), tmp7, None) tl.store(out_ptr1 + tl.broadcast_to(r2, [RBLOCK]), tmp8, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) @triton.jit def triton_per_fused_mse_loss_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = torch.ops.aten.randn.default([4, 4], device=device(type= 'cuda', index=0), pin_memory=False) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((), (), torch.float32) buf10 = buf7 del buf7 get_raw_stream(0) triton_per_fused_add_div_exp_mul_pow_sub_sum_0[grid(1)](buf10, buf3, buf1, buf0, buf4, buf6, 1, 256, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 buf8 = empty_strided_cuda((), (), torch.float32) buf9 = buf8 del buf8 triton_per_fused_mse_loss_1[grid(1)](buf9, buf5, primals_3, 1, 256, num_warps=2, num_stages=1) return buf9, buf10, primals_3, buf0, buf1, buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5, buf6, primals_6 class VAENew(nn.Module): def __init__(self, input_dim, latent_dim): super(VAENew, self).__init__() self.latent_dim = latent_dim self.hidden2mean = nn.Linear(input_dim, latent_dim) self.hidden2logv = nn.Linear(input_dim, latent_dim) self.latent2hidden = nn.Linear(latent_dim, input_dim) self.reset_params() def reset_params(self): nn.init.xavier_uniform_(self.hidden2mean.weight) nn.init.xavier_uniform_(self.hidden2logv.weight) nn.init.xavier_uniform_(self.latent2hidden.weight) def forward(self, input_0): primals_1 = self.hidden2mean.weight primals_2 = self.hidden2mean.bias primals_4 = self.hidden2logv.weight primals_5 = self.hidden2logv.bias primals_6 = self.latent2hidden.weight primals_7 = self.latent2hidden.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
LindgeW/DomainAdaption4DependencyParsing
VAE
false
5,541
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_dim, latent_dim): super().__init__() self.latent_dim = latent_dim self.hidden2mean = nn.Linear(input_dim, latent_dim) self.hidden2logv = nn.Linear(input_dim, latent_dim) self.latent2hidden = nn.Linear(latent_dim, input_dim) self.reset_params() def reset_params(self): nn.init.xavier_uniform_(self.hidden2mean.weight) nn.init.xavier_uniform_(self.hidden2logv.weight) nn.init.xavier_uniform_(self.latent2hidden.weight) def forward(self, h): mean = self.hidden2mean(h) logv = self.hidden2logv(h) std = torch.exp(0.5 * logv) z = torch.randn((h.size(0), self.latent_dim), device=h.device) z = z * std + mean restruct_hidden = self.latent2hidden(z) kl_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp() ) / logv.size(0) dist_loss = F.mse_loss(restruct_hidden, h) return dist_loss, kl_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
BiaffineAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %primals_3], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bd/cbd5vrzay2jbzmubtgdwxlp37vuwvz6hkpnirlmnp5x7bwlay5gq.py # Topologically Sorted Source Nodes: [att_weigths], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_weigths => amax, clone, exp, sub, sum_1 # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp10 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp15 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + (x2), tmp19, xmask) tl.store(out_ptr1 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dr/cdrn6bnobxdzhbn4umg6cz5cycfofs62kppoaiycjgik6nkueiql.py # Topologically Sorted Source Nodes: [att_weigths], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_weigths => clone, div, exp, sub # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y1 = (yindex // 4) y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x2 + (4*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(out_ptr0 + (y0 + (4*x2) + (16*y1)), tmp9, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [output], Original ATen: [aten._trilinear] buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), primals_1, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_4, primals_3, buf2, 128, grid=grid(128), stream=stream0) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [att_weigths], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, primals_2, buf3, buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_weigths], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf1, primals_2, buf3, buf4, buf5, buf6, 16, 4, grid=grid(16, 4), stream=stream0) del buf1 del buf4 del buf5 del primals_2 buf7 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [att_out], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, primals_3, out=buf7) return (buf7, primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 8), (8, 1), 0), buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BiaffineAttention(nn.Module): def __init__(self, in1_features, in2_features, num_label, bias=True): super(BiaffineAttention, self).__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.linear = nn.Linear(in1_features + in2_features, num_label, bias=False) self.softmax = nn.Softmax(dim=-1) def forward(self, head, dep): """ :param head: [batch, seq_len, hidden] 输入特征1, 即label-head :param dep: [batch, seq_len, hidden] 输入特征2, 即label-dep :return output: [batch, seq_len, num_cls] 每个元素对应类别的概率图 """ output = self.bilinear(head, dep) biaff_score = output + self.linear(torch.cat((head, dep), dim=-1)) biaff_score = biaff_score.transpose(1, 2) att_weigths = self.softmax(biaff_score) att_out = torch.bmm(att_weigths, dep) return att_out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in1_features': 4, 'in2_features': 4, 'num_label': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 + tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 + tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 + tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y1 = yindex // 4 y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr3 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr4 + (x2 + 4 * y1), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp9, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_4, (16, 4), (4, 1), 0), primals_1, reinterpret_tensor( primals_3, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_4, primals_3, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf1, primals_2, buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16, 4)](buf1, primals_2, buf3, buf4, buf5, buf6, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del buf1 del buf4 del buf5 del primals_2 buf7 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 extern_kernels.bmm(buf6, primals_3, out=buf7) return buf7, primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (16, 8), (8, 1), 0), buf6 class BiaffineAttentionNew(nn.Module): def __init__(self, in1_features, in2_features, num_label, bias=True): super(BiaffineAttentionNew, self).__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.linear = nn.Linear(in1_features + in2_features, num_label, bias=False) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0, input_1): primals_1 = self.bilinear.weight primals_2 = self.bilinear.bias primals_5 = self.linear.weight primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LindgeW/DomainAdaption4DependencyParsing
BiaffineAttention
false
5,542
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in1_features, in2_features, num_label, bias=True): super().__init__() self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias) self.linear = nn.Linear(in1_features + in2_features, num_label, bias=False) self.softmax = nn.Softmax(dim=-1) def forward(self, head, dep): """ :param head: [batch, seq_len, hidden] 输入特征1, 即label-head :param dep: [batch, seq_len, hidden] 输入特征2, 即label-dep :return output: [batch, seq_len, num_cls] 每个元素对应类别的概率图 """ output = self.bilinear(head, dep) biaff_score = output + self.linear(torch.cat((head, dep), dim=-1)) biaff_score = biaff_score.transpose(1, 2) att_weigths = self.softmax(biaff_score) att_out = torch.bmm(att_weigths, dep) return att_out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/tj/ctjenrmpmnfu2d3pk5t7qu4ih2v4kysuba4mugrps3thgibhpgig.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean] # Source node to ATen node mapping: # add_1 => add_1 # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul_5, neg, sub_3, sub_4, sub_5 # focal_weight => mul_4 # loss => mul_6 # loss_1 => mean # loss_cls => mul_7 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # pow_1 => pow_1 # pred_sigmoid => sigmoid # pt => add # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, %sub_4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.75), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %mul_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_6,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 0.25 tmp14 = tmp0 * tmp13 tmp15 = 0.75 tmp16 = tmp2 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tl.sigmoid(tmp3) tmp19 = tmp1 - tmp18 tmp20 = tmp19 * tmp0 tmp21 = tmp18 * tmp2 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp17 * tmp23 tmp25 = tmp12 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tmp31 = tmp30 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp31, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """Sigmoid focal loss. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). Dafaults to None. gamma (float): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float): A balanced form for Focal Loss. Defaults to 0.25. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class FocalLoss(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super(FocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Sigmoid focal loss. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, \\*). Dafaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The method used to reduce the loss into a scalar. Options are "none", "mean" and "sum". Defaults to None. Returns: torch.Tensor: Loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_cls = self.loss_weight * sigmoid_focal_loss(pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 0.25 tmp14 = tmp0 * tmp13 tmp15 = 0.75 tmp16 = tmp2 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tl.sigmoid(tmp3) tmp19 = tmp1 - tmp18 tmp20 = tmp19 * tmp0 tmp21 = tmp18 * tmp2 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp17 * tmp23 tmp25 = tmp12 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tmp31 = tmp30 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """Sigmoid focal loss. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). Dafaults to None. gamma (float): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float): A balanced form for Focal Loss. Defaults to 0.25. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class FocalLossNew(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super(FocalLossNew, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
LiuXiaoxuanPKU/actnn-mmcls
FocalLoss
false
5,543
[ "Apache-2.0" ]
1
c97d1116d54ddb3f9b1e51baebe25ffb2b3f7b75
https://github.com/LiuXiaoxuanPKU/actnn-mmcls/tree/c97d1116d54ddb3f9b1e51baebe25ffb2b3f7b75
import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """Sigmoid focal loss. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). Dafaults to None. gamma (float): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float): A balanced form for Focal Loss. Defaults to 0.25. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class Model(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super().__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Sigmoid focal loss. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*). weight (torch.Tensor, optional): Sample-wise # ... truncated (>4000 chars) for memory efficiency
BasicBlock_IN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xk/cxka55ttd2soaycbsbmcsfd6abljife3im4atrw25snc4lgpizpp.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # x => iota # Graph fragment: # %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u3/cu3esk352lfpwhdx5q3cisxtqfmayk6lbllkai24wfn3z3eotxka.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # x => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_1 = async_compile.triton('triton_poi_fused_reflection_pad2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u2/cu2h7kepp2vussiw5mawdeynnancc7laf3dn7xmj6m66txxk463j.py # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mv => mul, sum_1 # norm => pow_1, pow_2, sum_2 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mv_2 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mv_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mv_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_2(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (4 + r0), None) tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (8 + r0), None) tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (12 + r0), None) tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp26, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dd/cdd6hg7bodqqsl6rt7hkho7fppp37iuwsdghsonkdm35yx4beecq.py # Topologically Sorted Source Nodes: [mv_1, norm_1, add_1, truediv_1, sigma], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div, aten.dot] # Source node to ATen node mapping: # add_1 => add_1 # mv_1 => mul_1, sum_3 # norm_1 => pow_3, pow_4, sum_4 # sigma => mul_3, sum_6 # truediv_1 => div_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {}) # %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_4, 1e-12), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sum_3), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {}) triton_per_fused_add_div_dot_linalg_vector_norm_mv_3 = async_compile.triton('triton_per_fused_add_div_dot_linalg_vector_norm_mv_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_linalg_vector_norm_mv_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_dot_linalg_vector_norm_mv_3(in_ptr0, in_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tmp27 = tmp26 * tmp18 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp26, None) tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/iy/ciyfrn3h65grkri5tse22oq6oz24zqdxpthkaxnywq5nlwwcu426.py # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_2 => div_2 # Graph fragment: # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_4, %expand), kwargs = {}) triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/j2/cj25y5kypks7hldzyxgj75p5rupfp4lc5cvxsqotcl5sq3q2acok.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._native_batch_norm_legit] # Source node to ATen node mapping: # x_2 => add_2, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_3, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_per_fused__native_batch_norm_legit_5 = async_compile.triton('triton_per_fused__native_batch_norm_legit_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/z5/cz5isnfhua7uwsnx7hxjka3axj4urd3jqvilqt34xz47cbtq74ao.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # x_4 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_6, [None, None, %sub_6, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_6]), kwargs = {}) triton_poi_fused_reflection_pad2d_6 = async_compile.triton('triton_poi_fused_reflection_pad2d_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.2 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7q/c7qakexxwln2kazpcla524ogq3z3pzbf5zjqmylyo6xhvnxv26in.py # Topologically Sorted Source Nodes: [mv_3, norm_2, add_2, truediv_3], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add_2 => add_3 # mv_3 => mul_6, sum_7 # norm_2 => pow_5, pow_6, sum_8 # truediv_3 => div_3 # Graph fragment: # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, %primals_5), kwargs = {}) # %sum_7 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [1]), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_7, 2), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, None), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_8, 0.5), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_6, 1e-12), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_7, %add_3), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_mv_7 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mv_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mv_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (36 + r0), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (72 + r0), rmask, other=0.0) tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (108 + r0), rmask, other=0.0) tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-12 tmp26 = tmp24 + tmp25 tmp27 = tmp18 / tmp26 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp18, rmask) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp27, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cg/ccgxk6hsvkfuxxj43enqcdea57y4x5rki37xnq6pkpids7y5n7rw.py # Topologically Sorted Source Nodes: [truediv_3, mv_4], Original ATen: [aten.div, aten.mv] # Source node to ATen node mapping: # mv_4 => mul_7, sum_9 # truediv_3 => div_3 # Graph fragment: # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_7, %add_3), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, %div_3), kwargs = {}) # %sum_9 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [1]), kwargs = {}) triton_per_fused_div_mv_8 = async_compile.triton('triton_per_fused_div_mv_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mv_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mv_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (36*x0)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xy/cxyvlheoomg3phbyiazvdpun3iy3baokavqqz6edie2evohlyp4h.py # Topologically Sorted Source Nodes: [norm_3, add_3, truediv_4], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add_3 => add_4 # norm_3 => pow_7, pow_8, sum_10 # truediv_4 => div_4 # Graph fragment: # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_9, 2), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, None), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_10, 0.5), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_8, 1e-12), kwargs = {}) # %div_4 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_9, %add_4), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_9 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_9(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rr/crr6tv3ypuiw25pt55zuvsw3hjc35kiyz2jbsaoih3lleqjyzlaw.py # Topologically Sorted Source Nodes: [sigma_1], Original ATen: [aten.dot] # Source node to ATen node mapping: # sigma_1 => mul_9, sum_12 # Graph fragment: # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_4, %sum_9), kwargs = {}) # %sum_12 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul_9,), kwargs = {}) triton_per_fused_dot_10 = async_compile.triton('triton_per_fused_dot_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_dot_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_dot_10(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6k/c6k5avjbseb37trg5jk5ln36xmokh6sfxm5ccsm3zpxtlrhgun64.py # Topologically Sorted Source Nodes: [truediv_5], Original ATen: [aten.div] # Source node to ATen node mapping: # truediv_5 => div_5 # Graph fragment: # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_7, %expand_1), kwargs = {}) triton_poi_fused_div_11 = async_compile.triton('triton_poi_fused_div_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5d/c5ddtjdheky6atm26rexxirggj3mn2xfjk6rvqazcv7mhetauk5r.py # Topologically Sorted Source Nodes: [x_6, out, out_1], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out => add_6 # out_1 => relu # x_6 => add_5, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_11, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %primals_1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_6,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_12 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_12(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp29 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.2 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tmp30 = tmp28 + tmp29 tmp31 = tl.full([1, 1], 0, tl.int32) tmp32 = triton_helpers.maximum(tmp31, tmp30) tmp33 = tmp32 <= tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr1 + (r1 + (16*x0)), tmp32, xmask) tl.store(out_ptr2 + (r1 + (16*x0)), tmp33, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (36, ), (1, )) assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_1.run(primals_1, buf1, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv, norm, add, truediv], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mv_2.run(primals_4, primals_2, buf4, 1, 4, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((), (), torch.float32) buf30 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv_1, norm_1, add_1, truediv_1, sigma], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div, aten.dot] triton_per_fused_add_div_dot_linalg_vector_norm_mv_3.run(primals_4, buf4, buf7, buf30, 1, 4, grid=grid(1), stream=stream0) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_2], Original ATen: [aten.div] triton_poi_fused_div_4.run(primals_4, buf7, buf8, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf1, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf13 = reinterpret_tensor(buf11, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_5.run(buf13, buf9, buf10, 16, 16, grid=grid(16), stream=stream0) buf14 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_6.run(buf9, buf10, buf13, buf14, 576, grid=grid(576), stream=stream0) buf15 = empty_strided_cuda((36, ), (1, ), torch.float32) buf16 = buf7; del buf7 # reuse buf17 = buf16; del buf16 # reuse buf44 = empty_strided_cuda((36, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mv_3, norm_2, add_2, truediv_3], Original ATen: [aten.mv, aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_mv_7.run(buf17, primals_7, primals_5, buf15, buf44, 1, 36, grid=grid(1), stream=stream0) buf18 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [truediv_3, mv_4], Original ATen: [aten.div, aten.mv] triton_per_fused_div_mv_8.run(primals_7, buf15, buf17, buf18, 4, 36, grid=grid(4), stream=stream0) buf20 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [norm_3, add_3, truediv_4], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_per_fused_add_div_linalg_vector_norm_9.run(buf18, buf20, 1, 4, grid=grid(1), stream=stream0) buf21 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sigma_1], Original ATen: [aten.dot] triton_per_fused_dot_10.run(buf20, buf18, buf21, 1, 4, grid=grid(1), stream=stream0) del buf18 buf22 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv_5], Original ATen: [aten.div] triton_poi_fused_div_11.run(primals_7, buf21, buf22, 144, grid=grid(144), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf14, buf22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 4, 4, 4), (64, 16, 4, 1)) buf24 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf25 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf27 = reinterpret_tensor(buf25, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf25 # reuse buf28 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_6, out, out_1], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward] triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_12.run(buf27, buf23, primals_1, buf24, buf28, buf29, 16, 16, grid=grid(16), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [norm_1, add_1, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] buf31 = torch.ops.aten.set_.source_Tensor(primals_2, buf30) assert_size_stride(buf31, (4, ), (1, )) del primals_2 # Topologically Sorted Source Nodes: [], Original ATen: [] buf35 = torch.ops.aten.set_.source_Tensor(primals_3, buf4) assert_size_stride(buf35, (4, ), (1, )) del primals_3 # Topologically Sorted Source Nodes: [], Original ATen: [] buf39 = torch.ops.aten.set_.source_Tensor(primals_5, buf20) assert_size_stride(buf39, (4, ), (1, )) del buf15 # Topologically Sorted Source Nodes: [truediv_3], Original ATen: [aten.div] buf45 = torch.ops.aten.set_.source_Tensor(primals_6, buf44) assert_size_stride(buf45, (36, ), (1, )) del primals_6 return (buf28, buf8, buf22, primals_4, primals_5, primals_7, buf0, buf1, buf4, buf8, buf9, buf10, buf13, buf14, buf17, buf20, buf21, buf22, buf23, buf24, buf27, buf29, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((36, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class BasicBlock_IN(nn.Module): def __init__(self, inplanes, planes, stride=1, first=False): super(BasicBlock_IN, self).__init__() self.expansion = 4 self.stride = stride self.first = first self.conv1 = Conv2dLayer(inplanes, planes, 1, stride, 0, pad_type= 'reflect', activation='lrelu', norm='in') self.conv2 = Conv2dLayer(planes, planes, 3, 1, 1, pad_type= 'reflect', activation='lrelu', norm='in') if stride != 1 or first is True: self.downsample = Conv2dLayer(inplanes, planes * self.expansion, 1, stride, 0, pad_type='reflect', activation='lrelu', norm='bn' ) self.relu = nn.ReLU(inplace=True) def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) if self.stride != 1 or self.first is True: identity = self.downsample(x) out = out + identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math .abs(-3 + x1) + 16 * x2), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_2(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (4 + r0), None) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (8 + r0), None) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (12 + r0), None) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None) @triton.jit def triton_per_fused_add_div_dot_linalg_vector_norm_mv_3(in_ptr0, in_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.sum(tmp20, 1)[:, None] tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = tmp23 + tmp24 tmp26 = tmp18 / tmp25 tmp27 = tmp26 * tmp18 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp26, None) tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) @triton.jit def triton_poi_fused_div_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.2 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mv_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.load(in_ptr0 + (36 + r0), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp9 = tl.load(in_ptr0 + (72 + r0), rmask, other=0.0) tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.load(in_ptr0 + (108 + r0), rmask, other=0.0) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-12 tmp26 = tmp24 + tmp25 tmp27 = tmp18 / tmp26 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, rmask) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp27, rmask) @triton.jit def triton_per_fused_div_mv_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 36 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + r1, rmask, eviction_policy='evict_last', other=0.0 ) tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_9(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None) @triton.jit def triton_per_fused_dot_10(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused_div_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 / tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_12( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp29 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.2 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tmp30 = tmp28 + tmp29 tmp31 = tl.full([1, 1], 0, tl.int32) tmp32 = triton_helpers.maximum(tmp31, tmp30) tmp33 = tmp32 <= tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp32, xmask) tl.store(out_ptr2 + (r1 + 16 * x0), tmp33, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (36,), (1,)) assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_reflection_pad2d_1[grid(256)](primals_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mv_2[grid(1)](primals_4, primals_2, buf4, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) buf30 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_dot_linalg_vector_norm_mv_3[grid(1)](primals_4 , buf4, buf7, buf30, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_div_4[grid(16)](primals_4, buf7, buf8, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf9 = extern_kernels.convolution(buf1, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf13 = reinterpret_tensor(buf11, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf11 triton_per_fused__native_batch_norm_legit_5[grid(16)](buf13, buf9, buf10, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf14 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32 ) triton_poi_fused_reflection_pad2d_6[grid(576)](buf9, buf10, buf13, buf14, 576, XBLOCK=128, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((36,), (1,), torch.float32) buf16 = buf7 del buf7 buf17 = buf16 del buf16 buf44 = empty_strided_cuda((36,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mv_7[grid(1)](buf17, primals_7, primals_5, buf15, buf44, 1, 36, XBLOCK=1, num_warps= 2, num_stages=1) buf18 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_div_mv_8[grid(4)](primals_7, buf15, buf17, buf18, 4, 36, XBLOCK=1, num_warps=2, num_stages=1) buf20 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_div_linalg_vector_norm_9[grid(1)](buf18, buf20, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf21 = empty_strided_cuda((), (), torch.float32) triton_per_fused_dot_10[grid(1)](buf20, buf18, buf21, 1, 4, XBLOCK= 1, num_warps=2, num_stages=1) del buf18 buf22 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_div_11[grid(144)](primals_7, buf21, buf22, 144, XBLOCK=128, num_warps=4, num_stages=1) buf23 = extern_kernels.convolution(buf14, buf22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 4, 4, 4), (64, 16, 4, 1)) buf24 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf25 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf27 = reinterpret_tensor(buf25, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf25 buf28 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_per_fused__native_batch_norm_legit_add_relu_threshold_backward_12[ grid(16)](buf27, buf23, primals_1, buf24, buf28, buf29, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf31 = torch.ops.aten.set_.source_Tensor(primals_2, buf30) assert_size_stride(buf31, (4,), (1,)) del primals_2 buf35 = torch.ops.aten.set_.source_Tensor(primals_3, buf4) assert_size_stride(buf35, (4,), (1,)) del primals_3 buf39 = torch.ops.aten.set_.source_Tensor(primals_5, buf20) assert_size_stride(buf39, (4,), (1,)) del buf15 buf45 = torch.ops.aten.set_.source_Tensor(primals_6, buf44) assert_size_stride(buf45, (36,), (1,)) del primals_6 return (buf28, buf8, buf22, primals_4, primals_5, primals_7, buf0, buf1, buf4, buf8, buf9, buf10, buf13, buf14, buf17, buf20, buf21, buf22, buf23, buf24, buf27, buf29) def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super(Conv2dLayer, self).__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'sigmoid': self.activation = nn.Sigmoid() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if sn: self.conv2d = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False)) else: self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, bias=False) def forward(self, x): x = self.pad(x) x = self.conv2d(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class BasicBlock_INNew(nn.Module): def __init__(self, inplanes, planes, stride=1, first=False): super(BasicBlock_INNew, self).__init__() self.expansion = 4 self.stride = stride self.first = first self.conv1 = Conv2dLayer(inplanes, planes, 1, stride, 0, pad_type= 'reflect', activation='lrelu', norm='in') self.conv2 = Conv2dLayer(planes, planes, 3, 1, 1, pad_type= 'reflect', activation='lrelu', norm='in') if stride != 1 or first is True: self.downsample = Conv2dLayer(inplanes, planes * self.expansion, 1, stride, 0, pad_type='reflect', activation='lrelu', norm='bn' ) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_2 = self.conv1.conv2d.module.weight_u primals_3 = self.conv1.conv2d.module.weight_v primals_4 = self.conv1.conv2d.module.weight_bar primals_5 = self.conv2.conv2d.module.weight_u primals_6 = self.conv2.conv2d.module.weight_v primals_7 = self.conv2.conv2d.module.weight_bar primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
LenKerr/Semantic-Colorization-GAN
BasicBlock_IN
false
5,544
[ "MIT" ]
1
2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
https://github.com/LenKerr/Semantic-Colorization-GAN/tree/2ce52406ca6fc92e69692b451b1c9ae66ba3b76f
import torch import torch.nn as nn from torch.nn import Parameter def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-08, affine=True): super().__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = Parameter(torch.Tensor(num_features).uniform_()) self.beta = Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): def __init__(self, module, name='weight', power_iterations=1): super().__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, pad_type='zero', activation='lrelu', norm= 'none', sn=True): super().__init__() if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) if norm == 'bn': self.norm = nn.BatchNorm2d(out_channels) elif norm == 'in': self.norm = nn.InstanceNorm2d(out_channels) elif norm == 'ln': self.norm = LayerNorm(out_channels) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.acti # ... truncated (>4000 chars) for memory efficiency
Downsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'use_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2 def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class DownsampleNew(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, input_0): primals_2 = self.op.weight primals_3 = self.op.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Liujingxiu23/guided-diffusion
Downsample
false
5,545
[ "MIT" ]
1
0ba878e517b276c45d1195eb29f6f5f72659a05b
https://github.com/Liujingxiu23/guided-diffusion/tree/0ba878e517b276c45d1195eb29f6f5f72659a05b
import torch import torch.nn as nn def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class Model(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd(dims, self.channels, self.out_channels, 3, stride=stride, padding=1) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
AdditiveAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vu/cvuv73ijvl45rys2kvk2dud7shg4nznn622fzyldpf7pmxppx3o5.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dm/cdmkcxuzpnailvibeivaikqdr4zvashgzwju7qijhq5aizlo3aor.py # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_weights => amax, clone, exp, sub # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ot/cotghubvxq6i3kdxijgdhem4ntfmdc5cn3al7gv2kuqi2pbqiyxe.py # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_weights => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_score], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [att_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [att_out], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, primals_3, out=buf5) return (buf5, primals_3, buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AdditiveAttention(nn.Module): def __init__(self, in_features, att_hidden, out_features, bias=True): super(AdditiveAttention, self).__init__() self.out_size = out_features self.linear1 = nn.Linear(in_features=in_features, out_features= att_hidden, bias=bias) self.tanh = nn.Tanh() self.linear2 = nn.Linear(in_features=att_hidden, out_features= out_features, bias=False) self.softmax = nn.Softmax(dim=-1) def forward(self, inputs, mask=None): """ :param inputs: (bz, seq_len, in_features) :param mask: (bz, seq_len) 填充为0 :return: """ add_score = self.linear2(self.tanh(self.linear1(inputs))) add_score = add_score.transpose(1, 2) if mask is not None: pad_mask = mask == 0 add_score = add_score.masked_fill(pad_mask[:, None, :], - 1000000000.0) att_weights = self.softmax(add_score) att_out = torch.bmm(att_weights, inputs) return att_out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'att_hidden': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK= 4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 extern_kernels.bmm(buf4, primals_3, out=buf5) return buf5, primals_3, buf1, buf4, primals_4 class AdditiveAttentionNew(nn.Module): def __init__(self, in_features, att_hidden, out_features, bias=True): super(AdditiveAttentionNew, self).__init__() self.out_size = out_features self.linear1 = nn.Linear(in_features=in_features, out_features= att_hidden, bias=bias) self.tanh = nn.Tanh() self.linear2 = nn.Linear(in_features=att_hidden, out_features= out_features, bias=False) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
LindgeW/DomainAdaption4DependencyParsing
AdditiveAttention
false
5,546
[ "Apache-2.0" ]
1
5de136a37d8fe730e4235ed95bf923763fe21ea6
https://github.com/LindgeW/DomainAdaption4DependencyParsing/tree/5de136a37d8fe730e4235ed95bf923763fe21ea6
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, att_hidden, out_features, bias=True): super().__init__() self.out_size = out_features self.linear1 = nn.Linear(in_features=in_features, out_features= att_hidden, bias=bias) self.tanh = nn.Tanh() self.linear2 = nn.Linear(in_features=att_hidden, out_features= out_features, bias=False) self.softmax = nn.Softmax(dim=-1) def forward(self, inputs, mask=None): """ :param inputs: (bz, seq_len, in_features) :param mask: (bz, seq_len) 填充为0 :return: """ add_score = self.linear2(self.tanh(self.linear1(inputs))) add_score = add_score.transpose(1, 2) if mask is not None: pad_mask = mask == 0 add_score = add_score.masked_fill(pad_mask[:, None, :], - 1000000000.0) att_weights = self.softmax(add_score) att_out = torch.bmm(att_weights, inputs) return att_out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
myConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py # Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.convolution] # Source node to ATen node mapping: # autograd_function_apply => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) del primals_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (4, 4, 4, 4), (16, 64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class myConv2dFunction(torch.autograd.Function): @staticmethod def forward(ctx, input, weight, bias): ctx.save_for_backward(input, weight, bias) return F.conv2d(input, weight, bias) @staticmethod def backward(ctx, grad_output): input, weight, _bias = ctx.saved_tensors _out_channels, _in_channels, kernel_height, kernel_width = list(weight .size()) grad_input = F.conv2d(grad_output, torch.Tensor.rot90(weight, 2, [2, 3]).transpose(0, 1), padding=(kernel_width - 1, kernel_height - 1)) grad_weight = F.conv2d(input.transpose(0, 1), grad_output.transpose (0, 1)).transpose(0, 1) grad_bias = grad_output.sum([0, 2, 3]) return grad_input, grad_weight, grad_bias class myConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super(myConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kenerl_size = kernel_size sqrtk = math.sqrt(1 / (in_channels * kernel_size[0] * kernel_size[1])) self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size[0], kernel_size[1])) self.weight.data.uniform_(-sqrtk, sqrtk) self.bias = nn.Parameter(torch.Tensor(out_channels)) self.bias.data.uniform_(-sqrtk, sqrtk) def forward(self, input): return myConv2dFunction.apply(input, self.weight, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (4, 4, 4, 4), (16, 64, 4, 1), 0) class myConv2dFunction(torch.autograd.Function): @staticmethod def forward(ctx, input, weight, bias): ctx.save_for_backward(input, weight, bias) return F.conv2d(input, weight, bias) @staticmethod def backward(ctx, grad_output): input, weight, _bias = ctx.saved_tensors _out_channels, _in_channels, kernel_height, kernel_width = list(weight .size()) grad_input = F.conv2d(grad_output, torch.Tensor.rot90(weight, 2, [2, 3]).transpose(0, 1), padding=(kernel_width - 1, kernel_height - 1)) grad_weight = F.conv2d(input.transpose(0, 1), grad_output.transpose (0, 1)).transpose(0, 1) grad_bias = grad_output.sum([0, 2, 3]) return grad_input, grad_weight, grad_bias class myConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super(myConv2dNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kenerl_size = kernel_size sqrtk = math.sqrt(1 / (in_channels * kernel_size[0] * kernel_size[1])) self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size[0], kernel_size[1])) self.weight.data.uniform_(-sqrtk, sqrtk) self.bias = nn.Parameter(torch.Tensor(out_channels)) self.bias.data.uniform_(-sqrtk, sqrtk) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
LogCreative/custom-tensor
myConv2d
false
5,547
[ "MIT" ]
1
63eccf82821b4d4076a4fdfc7380ee72333360f1
https://github.com/LogCreative/custom-tensor/tree/63eccf82821b4d4076a4fdfc7380ee72333360f1
import math import torch import torch.nn as nn import torch.nn.functional as F class myConv2dFunction(torch.autograd.Function): @staticmethod def forward(ctx, input, weight, bias): ctx.save_for_backward(input, weight, bias) return F.conv2d(input, weight, bias) @staticmethod def backward(ctx, grad_output): input, weight, _bias = ctx.saved_tensors _out_channels, _in_channels, kernel_height, kernel_width = list(weight .size()) grad_input = F.conv2d(grad_output, torch.Tensor.rot90(weight, 2, [2, 3]).transpose(0, 1), padding=(kernel_width - 1, kernel_height - 1)) grad_weight = F.conv2d(input.transpose(0, 1), grad_output.transpose (0, 1)).transpose(0, 1) grad_bias = grad_output.sum([0, 2, 3]) return grad_input, grad_weight, grad_bias class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kenerl_size = kernel_size sqrtk = math.sqrt(1 / (in_channels * kernel_size[0] * kernel_size[1])) self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size[0], kernel_size[1])) self.weight.data.uniform_(-sqrtk, sqrtk) self.bias = nn.Parameter(torch.Tensor(out_channels)) self.bias.data.uniform_(-sqrtk, sqrtk) def forward(self, input): return myConv2dFunction.apply(input, self.weight, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
FCNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] # Source node to ATen node mapping: # y => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return (buf4, primals_1, buf1, buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class VanillaNet(BasicNet): def predict(self, x, to_numpy=False): y = self.forward(x) if to_numpy: if type(y) is list: y = [y_.cpu().data.numpy() for y_ in y] else: y = y.cpu().data.numpy() return y class FCNet(nn.Module, VanillaNet): def __init__(self, dims, optimizer_fn=None, gpu=True): super(FCNet, self).__init__() self.fc1 = nn.Linear(dims[0], dims[1]) self.fc2 = nn.Linear(dims[1], dims[2]) self.fc3 = nn.Linear(dims[2], dims[3]) BasicNet.__init__(self, optimizer_fn, gpu) def forward(self, x): x = self.to_torch_variable(x) x = x.view(x.size(0), -1) y = F.relu(self.fc1(x)) y = F.relu(self.fc2(y)) y = self.fc3(y) return y def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dims': [4, 4, 4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class VanillaNet(BasicNet): def predict(self, x, to_numpy=False): y = self.forward(x) if to_numpy: if type(y) is list: y = [y_.cpu().data.numpy() for y_ in y] else: y = y.cpu().data.numpy() return y class FCNetNew(nn.Module, VanillaNet): def __init__(self, dims, optimizer_fn=None, gpu=True): super(FCNetNew, self).__init__() self.fc1 = nn.Linear(dims[0], dims[1]) self.fc2 = nn.Linear(dims[1], dims[2]) self.fc3 = nn.Linear(dims[2], dims[3]) BasicNet.__init__(self, optimizer_fn, gpu) def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc1.bias primals_2 = self.fc2.weight primals_5 = self.fc2.bias primals_4 = self.fc3.weight primals_7 = self.fc3.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
G-Flor/deeprl
FCNet
false
5,548
[ "Apache-2.0" ]
1
aeae2c5d585e5853dc638968b1f090eb60abd351
https://github.com/G-Flor/deeprl/tree/aeae2c5d585e5853dc638968b1f090eb60abd351
import torch import numpy as np from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim class BasicNet: def __init__(self, optimizer_fn, gpu, LSTM=False): self.gpu = gpu and torch.cuda.is_available() self.LSTM = LSTM if self.gpu: self self.FloatTensor = torch.FloatTensor else: self.FloatTensor = torch.FloatTensor def to_torch_variable(self, x, dtype='float32'): if isinstance(x, Variable): return x if not isinstance(x, torch.FloatTensor): x = torch.from_numpy(np.asarray(x, dtype=dtype)) if self.gpu: x = x return Variable(x) def reset(self, terminal): if not self.LSTM: return if terminal: self.h.data.zero_() self.c.data.zero_() self.h = Variable(self.h.data) self.c = Variable(self.c.data) class VanillaNet(BasicNet): def predict(self, x, to_numpy=False): y = self.forward(x) if to_numpy: if type(y) is list: y = [y_.cpu().data.numpy() for y_ in y] else: y = y.cpu().data.numpy() return y class Model(nn.Module, VanillaNet): def __init__(self, dims, optimizer_fn=None, gpu=True): super().__init__() self.fc1 = nn.Linear(dims[0], dims[1]) self.fc2 = nn.Linear(dims[1], dims[2]) self.fc3 = nn.Linear(dims[2], dims[3]) BasicNet.__init__(self, optimizer_fn, gpu) def forward(self, x): x = self.to_torch_variable(x) x = x.view(x.size(0), -1) y = F.relu(self.fc1(x)) y = F.relu(self.fc2(y)) y = self.fc3(y) return y def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return []
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_1 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from numpy import * class Net(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, x): x = self.hidden(x) x = torch.tanh(x) x = self.predict(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_feature': 4, 'n_hidden': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from numpy import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4 class NetNew(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(NetNew, self).__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, input_0): primals_1 = self.hidden.weight primals_2 = self.hidden.bias primals_4 = self.predict.weight primals_5 = self.predict.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LishudaNoBug/learning_PyTorch
Net
false
5,549
[ "MIT" ]
1
1026035a9cb3d70e2fe97363b532e63db3ca136d
https://github.com/LishudaNoBug/learning_PyTorch/tree/1026035a9cb3d70e2fe97363b532e63db3ca136d
import torch from numpy import * class Model(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super().__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, x): x = self.hidden(x) x = torch.tanh(x) x = self.predict(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gd/cgdtd7ki7lurypoeyfwjebdfquygdeupjef4ltfbbbdk5u7owcpl.py # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # intersection => mul # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) tl.store(out_ptr2 + (x0), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vq/cvqiixp4wmb73ig2cla6idbqq7i6vd5n3qmdluadrv32f52pdgw3.py # Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, score, sum_4, truediv_1, score_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # mul_1 => mul_1 # score => div # score_1 => sub # sum_4 => sum_4 # truediv_1 => div_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp1 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, score, sum_4, truediv_1, score_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub] triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from typing import * class DiceLoss(nn.Module): def __init__(self, smooth: 'float'=1.0): super().__init__() self.smooth = smooth def forward(self, logits, targets): num = targets.size(0) probs = torch.sigmoid(logits) m1, m2 = probs.view(num, -1), targets.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + self.smooth) / (m1.sum(1) + m2 .sum(1) + self.smooth) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp1 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class DiceLossNew(nn.Module): def __init__(self, smooth: 'float'=1.0): super().__init__() self.smooth = smooth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Lleyton-Ariton/landwatch
DiceLoss
false
5,550
[ "MIT" ]
1
21e86e899d33d0ee349cf9bf87c6c13ebdab82fa
https://github.com/Lleyton-Ariton/landwatch/tree/21e86e899d33d0ee349cf9bf87c6c13ebdab82fa
import torch import torch.nn as nn from typing import * class Model(nn.Module): def __init__(self, smooth: 'float'=1.0): super().__init__() self.smooth = smooth def forward(self, logits, targets): num = targets.size(0) probs = torch.sigmoid(logits) m1, m2 = probs.view(num, -1), targets.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + self.smooth) / (m1.sum(1) + m2 .sum(1) + self.smooth) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
KL_Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/l3/cl3mqwaki56dc4zcxfjjgkbopnejxzhksqm6egdinynmjrsrw2qw.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pf/cpfkvifhrhobwuxls65xhwdpkryeblqmmtghouii4lp3rhe3crx4.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/d5/cd5jvj2xtf5vbrydrjr7gjura3e2lbc3fjgbn4anltegd6a6wh3n.py # Topologically Sorted Source Nodes: [softmax, teacher_outputs, kl_div, output_batch, loss], Original ATen: [aten._softmax, aten.add, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # kl_div => div_3, eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # loss => mul_2 # output_batch => exp, log, sub_1, sum_1 # softmax => div_2, sum_2 # teacher_outputs => add # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %add : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_2, 1e-07), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%add,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, 1), kwargs = {}) triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (r3), None) tmp20 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = libdevice.isnan(tmp10).to(tl.int1) tmp12 = 0.0 tmp13 = tmp10 == tmp12 tmp14 = tl_math.log(tmp10) tmp15 = tmp10 * tmp14 tmp16 = tl.where(tmp13, tmp12, tmp15) tmp17 = float("nan") tmp18 = tl.where(tmp11, tmp17, tmp16) tmp21 = tl_math.exp(tmp20) tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tl_math.log(tmp30) tmp32 = tmp19 - tmp31 tmp33 = tmp10 * tmp32 tmp34 = tmp18 - tmp33 tmp35 = tl.broadcast_to(tmp34, [RBLOCK]) tmp37 = triton_helpers.promote_to_tensor(tl.sum(tmp35, 0)) tmp38 = 0.25 tmp39 = tmp37 * tmp38 tmp40 = 1.0 tmp41 = tmp39 * tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp41, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [softmax, teacher_outputs, kl_div, output_batch, loss], Original ATen: [aten._softmax, aten.add, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.utils class KL_Loss(nn.Module): def __init__(self, temperature=1): super(KL_Loss, self).__init__() self.T = temperature def forward(self, output_batch, teacher_outputs): output_batch = F.log_softmax(output_batch / self.T, dim=1) teacher_outputs = F.softmax(teacher_outputs / self.T, dim=1) + 10 ** -7 loss = self.T * self.T * nn.KLDivLoss(reduction='batchmean')( output_batch, teacher_outputs) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + r3, None) tmp20 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = libdevice.isnan(tmp10).to(tl.int1) tmp12 = 0.0 tmp13 = tmp10 == tmp12 tmp14 = tl_math.log(tmp10) tmp15 = tmp10 * tmp14 tmp16 = tl.where(tmp13, tmp12, tmp15) tmp17 = float('nan') tmp18 = tl.where(tmp11, tmp17, tmp16) tmp21 = tl_math.exp(tmp20) tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tl_math.log(tmp30) tmp32 = tmp19 - tmp31 tmp33 = tmp10 * tmp32 tmp34 = tmp18 - tmp33 tmp35 = tl.broadcast_to(tmp34, [RBLOCK]) tmp37 = triton_helpers.promote_to_tensor(tl.sum(tmp35, 0)) tmp38 = 0.25 tmp39 = tmp37 * tmp38 tmp40 = 1.0 tmp41 = tmp39 * tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_2[grid (1)](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class KL_LossNew(nn.Module): def __init__(self, temperature=1): super(KL_LossNew, self).__init__() self.T = temperature def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Little0o0/FedML
KL_Loss
false
5,551
[ "Apache-2.0" ]
1
720015c90fcfec88d465a81b1e8fb45676dce9fb
https://github.com/Little0o0/FedML/tree/720015c90fcfec88d465a81b1e8fb45676dce9fb
import torch from torch import nn import torch.nn.functional as F import torch.utils class Model(nn.Module): def __init__(self, temperature=1): super().__init__() self.T = temperature def forward(self, output_batch, teacher_outputs): output_batch = F.log_softmax(output_batch / self.T, dim=1) teacher_outputs = F.softmax(teacher_outputs / self.T, dim=1) + 10 ** -7 loss = self.T * self.T * nn.KLDivLoss(reduction='batchmean')( output_batch, teacher_outputs) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
CE_Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jh/cjhafiazvhnqzahvgpeyzzxgeb5atp7ebiv4plitnblial63qxb6.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 1), kwargs = {}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/k4/ck4jcdo7pholu6j7hyu5a5wtavlli6c52bnxxxqwlta7ah2dps4s.py # Topologically Sorted Source Nodes: [teacher_outputs], Original ATen: [aten._softmax] # Source node to ATen node mapping: # teacher_outputs => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mn/cmnlidmjpz2yihyn6sanrop2yyzhcy2ynecgwnndb5sydmmmfwas.py # Topologically Sorted Source Nodes: [output_batch, teacher_outputs, mul, sum_1, mul_1, loss], Original ATen: [aten._log_softmax, aten._softmax, aten.mul, aten.sum, aten.div] # Source node to ATen node mapping: # loss => div_3 # mul => mul # mul_1 => mul_1 # output_batch => exp, log, sub_1, sum_1 # sum_1 => sum_3 # teacher_outputs => div_2, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %div_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, -1), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 4), kwargs = {}) triton_per_fused__log_softmax__softmax_div_mul_sum_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_div_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_div_mul_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp15 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = tmp14 / tmp21 tmp23 = tmp13 * tmp22 tmp24 = tl.broadcast_to(tmp23, [RBLOCK]) tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0)) tmp27 = -1.0 tmp28 = tmp26 * tmp27 tmp29 = 0.25 tmp30 = tmp28 * tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp30, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [teacher_outputs], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output_batch, teacher_outputs, mul, sum_1, mul_1, loss], Original ATen: [aten._log_softmax, aten._softmax, aten.mul, aten.sum, aten.div] triton_per_fused__log_softmax__softmax_div_mul_sum_2.run(buf4, buf0, buf1, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.utils class CE_Loss(nn.Module): def __init__(self, temperature=1): super(CE_Loss, self).__init__() self.T = temperature def forward(self, output_batch, teacher_outputs): output_batch = F.log_softmax(output_batch / self.T, dim=1) teacher_outputs = F.softmax(teacher_outputs / self.T, dim=1) loss = -self.T * self.T * torch.sum(torch.mul(output_batch, teacher_outputs)) / teacher_outputs.size(0) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp15 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = tmp14 / tmp21 tmp23 = tmp13 * tmp22 tmp24 = tl.broadcast_to(tmp23, [RBLOCK]) tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0)) tmp27 = -1.0 tmp28 = tmp26 * tmp27 tmp29 = 0.25 tmp30 = tmp28 * tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](arg1_1, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sum_2[grid(1)](buf4, buf0, buf1, 1, 256, num_warps=2, num_stages=1) del buf0 del buf1 return buf4, class CE_LossNew(nn.Module): def __init__(self, temperature=1): super(CE_LossNew, self).__init__() self.T = temperature def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Little0o0/FedML
CE_Loss
false
5,552
[ "Apache-2.0" ]
1
720015c90fcfec88d465a81b1e8fb45676dce9fb
https://github.com/Little0o0/FedML/tree/720015c90fcfec88d465a81b1e8fb45676dce9fb
import torch from torch import nn import torch.nn.functional as F import torch.utils class Model(nn.Module): def __init__(self, temperature=1): super().__init__() self.T = temperature def forward(self, output_batch, teacher_outputs): output_batch = F.log_softmax(output_batch / self.T, dim=1) teacher_outputs = F.softmax(teacher_outputs / self.T, dim=1) loss = -self.T * self.T * torch.sum(torch.mul(output_batch, teacher_outputs)) / teacher_outputs.size(0) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SENet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # out_1 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class SENet(nn.Module): """support estimation network""" def __init__(self, input_size: 'int', hidden_size: 'int', output_dims: 'int') ->None: super(SENet, self).__init__() self.l_1 = nn.Linear(input_size, hidden_size) self.l_2 = nn.Linear(hidden_size, output_dims) self.act = nn.Tanh() def forward(self, x: 'torch.Tensor') ->torch.Tensor: out = self.l_1(x) out = self.act(out) out = self.l_2(out) out = self.act(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, primals_4 class SENetNew(nn.Module): """support estimation network""" def __init__(self, input_size: 'int', hidden_size: 'int', output_dims: 'int') ->None: super(SENetNew, self).__init__() self.l_1 = nn.Linear(input_size, hidden_size) self.l_2 = nn.Linear(hidden_size, output_dims) self.act = nn.Tanh() def forward(self, input_0): primals_1 = self.l_1.weight primals_2 = self.l_1.bias primals_4 = self.l_2.weight primals_5 = self.l_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
L-Net-1992/DI-engine
SENet
false
5,553
[ "Apache-2.0" ]
1
06803b4e18fa64bbed0fd1d44952242c0c063b0f
https://github.com/L-Net-1992/DI-engine/tree/06803b4e18fa64bbed0fd1d44952242c0c063b0f
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """support estimation network""" def __init__(self, input_size: 'int', hidden_size: 'int', output_dims: 'int') ->None: super().__init__() self.l_1 = nn.Linear(input_size, hidden_size) self.l_2 = nn.Linear(hidden_size, output_dims) self.act = nn.Tanh() def forward(self, x: 'torch.Tensor') ->torch.Tensor: out = self.l_1(x) out = self.act(out) out = self.l_2(out) out = self.act(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
MAPELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/57/c577ica3v5sskq3ryzudbubbezlo2d426ruphdt5ra7ufqarb4m5.py # Topologically Sorted Source Nodes: [sub, abs_1, abs_2, add, truediv, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # abs_2 => abs_2 # add => add # mean => mean # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_2, 0.01), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, %add), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) triton_per_fused_abs_add_div_mean_sub_0 = async_compile.triton('triton_per_fused_abs_add_div_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl_math.abs(tmp1) tmp5 = 0.01 tmp6 = tmp4 + tmp5 tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, abs_1, abs_2, add, truediv, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MAPELoss(nn.Module): def forward(self, input, target): return (torch.abs(input - target) / (torch.abs(target) + 0.01)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl_math.abs(tmp1) tmp5 = 0.01 tmp6 = tmp4 + tmp5 tmp7 = tmp3 / tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MAPELossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
LongerVision/oidn
MAPELoss
false
5,554
[ "Apache-2.0" ]
1
2f9e59f8b747b217f78c5c274f4f2bff347a03a7
https://github.com/LongerVision/oidn/tree/2f9e59f8b747b217f78c5c274f4f2bff347a03a7
import torch import torch.nn as nn class Model(nn.Module): def forward(self, input, target): return (torch.abs(input - target) / (torch.abs(target) + 0.01)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MultiheadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5z/c5zy7julai2lhuinuwjgyl62nx7cyws6ni5poe5jzp7qn532rcgh.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf10, primals_1, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 return (buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MultiheadAttention(nn.Module): """A warpper for torch.nn.MultiheadAttention. This module implements MultiheadAttention with residual connection. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. Same as `nn.MultiheadAttention`. attn_drop (float): A Dropout layer on attn_output_weights. Default 0.0. proj_drop (float): The drop out rate after attention. Default 0.0. """ def __init__(self, embed_dims, num_heads, attn_drop=0.0, proj_drop=0.0): super(MultiheadAttention, self).__init__() assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.' self.embed_dims = embed_dims self.num_heads = num_heads self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop) self.dropout = nn.Dropout(proj_drop) def forward(self, x, key=None, value=None, residual=None, query_pos= None, key_pos=None, attn_mask=None, key_padding_mask=None): """Forward function for `MultiheadAttention`. Args: x (Tensor): The input query with shape [num_query, bs, embed_dims]. Same in `nn.MultiheadAttention.forward`. key (Tensor): The key tensor with shape [num_key, bs, embed_dims]. Same in `nn.MultiheadAttention.forward`. Default None. If None, the `query` will be used. value (Tensor): The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. Default None. If None, the `key` will be used. residual (Tensor): The tensor used for addition, with the same shape as `x`. Default None. If None, `x` will be used. query_pos (Tensor): The positional encoding for query, with the same shape as `x`. Default None. If not None, it will be added to `x` before forward function. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. Default None. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. attn_mask (Tensor): ByteTensor mask with shape [num_query, num_key]. Same in `nn.MultiheadAttention.forward`. Default None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_key]. Same in `nn.MultiheadAttention.forward`. Default None. Returns: Tensor: forwarded results with shape [num_query, bs, embed_dims]. """ query = x if key is None: key = query if value is None: value = key if residual is None: residual = x if key_pos is None: if query_pos is not None and key is not None: if query_pos.shape == key.shape: key_pos = query_pos if query_pos is not None: query = query + query_pos if key_pos is not None: key = key + key_pos out = self.attn(query, key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] return residual + self.dropout(out) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'embed_dims': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_2 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_add_4[grid(16)](buf10, primals_1, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf10, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0) class MultiheadAttentionNew(nn.Module): """A warpper for torch.nn.MultiheadAttention. This module implements MultiheadAttention with residual connection. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. Same as `nn.MultiheadAttention`. attn_drop (float): A Dropout layer on attn_output_weights. Default 0.0. proj_drop (float): The drop out rate after attention. Default 0.0. """ def __init__(self, embed_dims, num_heads, attn_drop=0.0, proj_drop=0.0): super(MultiheadAttentionNew, self).__init__() assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.' self.embed_dims = embed_dims self.num_heads = num_heads self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop) self.dropout = nn.Dropout(proj_drop) def forward(self, input_0): primals_2 = self.attn.in_proj_weight primals_3 = self.attn.in_proj_bias primals_1 = self.attn.out_proj.weight primals_5 = self.attn.out_proj.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LiuXiaoxuanPKU/actnn-mmcls
MultiheadAttention
false
5,555
[ "Apache-2.0" ]
1
c97d1116d54ddb3f9b1e51baebe25ffb2b3f7b75
https://github.com/LiuXiaoxuanPKU/actnn-mmcls/tree/c97d1116d54ddb3f9b1e51baebe25ffb2b3f7b75
import torch import torch.nn as nn class Model(nn.Module): """A warpper for torch.nn.MultiheadAttention. This module implements MultiheadAttention with residual connection. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. Same as `nn.MultiheadAttention`. attn_drop (float): A Dropout layer on attn_output_weights. Default 0.0. proj_drop (float): The drop out rate after attention. Default 0.0. """ def __init__(self, embed_dims, num_heads, attn_drop=0.0, proj_drop=0.0): super().__init__() assert embed_dims % num_heads == 0, f'embed_dims must be divisible by num_heads. got {embed_dims} and {num_heads}.' self.embed_dims = embed_dims self.num_heads = num_heads self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop) self.dropout = nn.Dropout(proj_drop) def forward(self, x, key=None, value=None, residual=None, query_pos= None, key_pos=None, attn_mask=None, key_padding_mask=None): """Forward function for `MultiheadAttention`. Args: x (Tensor): The input query with shape [num_query, bs, embed_dims]. Same in `nn.MultiheadAttention.forward`. key (Tensor): The key tensor with shape [num_key, bs, embed_dims]. Same in `nn.MultiheadAttention.forward`. Default None. If None, the `query` will be used. value (Tensor): The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. Default None. If None, the `key` will be used. residual (Tensor): The tensor used for addition, with the same shape as `x`. Default None. If None, `x` will be used. query_pos (Tensor): The positional encoding for query, with the same shape as `x`. Default None. If not None, it will be added to `x` before forward function. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. Default None. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. attn_mask (Tensor): ByteTensor mask with shape [num_query, num_key]. Same in `nn.MultiheadAttention.forward`. Default None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_key]. Same in `nn.MultiheadAttention.forward`. Default None. Returns: Tensor: forwarded results with shape [num_query, bs, embed_dims]. """ query = x if key is None: key = query if value is None: value = key if residual is None: residual = x if key_pos is None: if query_pos is not None and key is not None: if query_pos.shape == key.shape: key_pos = query_pos if query_pos is not None: query = query + query_pos if key_pos is not None: key = key + key_pos out = self.attn(query, key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] return residual + self.dropout(out) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
SMAPELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/l5/cl5owl63sm4p4y6vlb33daka3yidfydimcncohmywgjaogtb5o5f.py # Topologically Sorted Source Nodes: [sub, abs_1, abs_2, abs_3, add, add_1, truediv, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # abs_2 => abs_2 # abs_3 => abs_3 # add => add # add_1 => add_1 # mean => mean # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_2, %abs_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 0.01), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, %add_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) triton_per_fused_abs_add_div_mean_sub_0 = async_compile.triton('triton_per_fused_abs_add_div_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl_math.abs(tmp0) tmp5 = tl_math.abs(tmp1) tmp6 = tmp4 + tmp5 tmp7 = 0.01 tmp8 = tmp6 + tmp7 tmp9 = tmp3 / tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, abs_1, abs_2, abs_3, add, add_1, truediv, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SMAPELoss(nn.Module): def forward(self, input, target): return (torch.abs(input - target) / (torch.abs(input) + torch.abs( target) + 0.01)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl_math.abs(tmp0) tmp5 = tl_math.abs(tmp1) tmp6 = tmp4 + tmp5 tmp7 = 0.01 tmp8 = tmp6 + tmp7 tmp9 = tmp3 / tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SMAPELossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
LongerVision/oidn
SMAPELoss
false
5,556
[ "Apache-2.0" ]
1
2f9e59f8b747b217f78c5c274f4f2bff347a03a7
https://github.com/LongerVision/oidn/tree/2f9e59f8b747b217f78c5c274f4f2bff347a03a7
import torch import torch.nn as nn class Model(nn.Module): def forward(self, input, target): return (torch.abs(input - target) / (torch.abs(input) + torch.abs( target) + 0.01)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GradientLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/mc/cmcqirvp2mlpcb2uj7frf6qjesljm7rlah5n553gwdx35dyjovhe.py # Topologically Sorted Source Nodes: [cat, cat_1, sub_4, abs_1, mean], Original ATen: [aten.cat, aten.sub, aten.abs, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # cat => cat # cat_1 => cat_1 # mean => mean # sub_4 => sub_4 # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sub, %sub_1], -3), kwargs = {}) # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_2, %sub_3], -3), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, %cat_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_4,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_cat_mean_sub_0 = async_compile.triton('triton_per_fused_abs_cat_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_cat_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_cat_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 288 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = rindex < rnumel r2 = (rindex // 9) % 8 r0 = rindex % 3 r1 = (rindex // 3) % 3 r3 = (rindex // 72) tmp0 = r2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (tl.broadcast_to(4 + r0 + (4*r1) + (16*r2) + (64*r3), [RBLOCK])), rmask & tmp4, other=0.0) tmp6 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*r1) + (16*r2) + (64*r3), [RBLOCK])), rmask & tmp4, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 8, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr0 + (tl.broadcast_to(1 + r0 + (4*r1) + (16*((-4) + r2)) + (64*r3), [RBLOCK])), rmask & tmp10, other=0.0) tmp14 = tl.load(in_ptr0 + (tl.broadcast_to(r0 + (4*r1) + (16*((-4) + r2)) + (64*r3), [RBLOCK])), rmask & tmp10, other=0.0) tmp15 = tmp13 - tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tmp19 = tl.load(in_ptr1 + (tl.broadcast_to(4 + r0 + (4*r1) + (16*r2) + (64*r3), [RBLOCK])), rmask & tmp4, other=0.0) tmp20 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*r1) + (16*r2) + (64*r3), [RBLOCK])), rmask & tmp4, other=0.0) tmp21 = tmp19 - tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tl.load(in_ptr1 + (tl.broadcast_to(1 + r0 + (4*r1) + (16*((-4) + r2)) + (64*r3), [RBLOCK])), rmask & tmp10, other=0.0) tmp25 = tl.load(in_ptr1 + (tl.broadcast_to(r0 + (4*r1) + (16*((-4) + r2)) + (64*r3), [RBLOCK])), rmask & tmp10, other=0.0) tmp26 = tmp24 - tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp10, tmp26, tmp27) tmp29 = tl.where(tmp4, tmp23, tmp28) tmp30 = tmp18 - tmp29 tmp31 = tl_math.abs(tmp30) tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = tl.where(rmask, tmp32, 0) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0)) tmp36 = 288.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp37, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [cat, cat_1, sub_4, abs_1, mean], Original ATen: [aten.cat, aten.sub, aten.abs, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_cat_mean_sub_0.run(buf1, arg0_1, arg1_1, 1, 288, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def gradient(input): input0 = input[..., :-1, :-1] didy = input[..., 1:, :-1] - input0 didx = input[..., :-1, 1:] - input0 return torch.cat((didy, didx), -3) class GradientLoss(nn.Module): def forward(self, input, target): return torch.abs(gradient(input) - gradient(target)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_cat_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 rnumel = 288 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r2 = rindex // 9 % 8 r0 = rindex % 3 r1 = rindex // 3 % 3 r3 = rindex // 72 tmp0 = r2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + tl.broadcast_to(4 + r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp6 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr0 + tl.broadcast_to(1 + r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp14 = tl.load(in_ptr0 + tl.broadcast_to(r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp15 = tmp13 - tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tmp19 = tl.load(in_ptr1 + tl.broadcast_to(4 + r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp20 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 16 * r2 + 64 * r3, [RBLOCK]), rmask & tmp4, other=0.0) tmp21 = tmp19 - tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tl.load(in_ptr1 + tl.broadcast_to(1 + r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp25 = tl.load(in_ptr1 + tl.broadcast_to(r0 + 4 * r1 + 16 * (-4 + r2) + 64 * r3, [RBLOCK]), rmask & tmp10, other=0.0) tmp26 = tmp24 - tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp10, tmp26, tmp27) tmp29 = tl.where(tmp4, tmp23, tmp28) tmp30 = tmp18 - tmp29 tmp31 = tl_math.abs(tmp30) tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = tl.where(rmask, tmp32, 0) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp34, 0)) tmp36 = 288.0 tmp37 = tmp35 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_cat_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 288, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf1, def gradient(input): input0 = input[..., :-1, :-1] didy = input[..., 1:, :-1] - input0 didx = input[..., :-1, 1:] - input0 return torch.cat((didy, didx), -3) class GradientLossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
LongerVision/oidn
GradientLoss
false
5,557
[ "Apache-2.0" ]
1
2f9e59f8b747b217f78c5c274f4f2bff347a03a7
https://github.com/LongerVision/oidn/tree/2f9e59f8b747b217f78c5c274f4f2bff347a03a7
import torch import torch.nn as nn def gradient(input): input0 = input[..., :-1, :-1] didy = input[..., 1:, :-1] - input0 didx = input[..., :-1, 1:] - input0 return torch.cat((didy, didx), -3) class Model(nn.Module): def forward(self, input, target): return torch.abs(gradient(input) - gradient(target)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ResHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] # Source node to ATen node mapping: # x => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return (buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn as nn import torch.utils.data def gap2d(_w_in): """Helper for building a gap2d layer.""" return nn.AdaptiveAvgPool2d((1, 1)) def gap2d_cx(cx, _w_in): """Accumulates complexity of gap2d into cx = (h, w, flops, params, acts).""" flops, params, acts = cx['flops'], cx['params'], cx['acts'] return {'h': 1, 'w': 1, 'flops': flops, 'params': params, 'acts': acts} def linear(w_in, w_out, *, bias=False): """Helper for building a linear layer.""" return nn.Linear(w_in, w_out, bias=bias) def linear_cx(cx, w_in, w_out, *, bias=False, num_locations=1): """Accumulates complexity of linear into cx = (h, w, flops, params, acts).""" h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params' ], cx['acts'] flops += w_in * w_out * num_locations + (w_out * num_locations if bias else 0) params += w_in * w_out + (w_out if bias else 0) acts += w_out * num_locations return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts} class ResHead(Module): """ResNet head: AvgPool, 1x1.""" def __init__(self, w_in, num_classes): super(ResHead, self).__init__() self.avg_pool = gap2d(w_in) self.fc = linear(w_in, num_classes, bias=True) def forward(self, x): x = self.avg_pool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x @staticmethod def complexity(cx, w_in, num_classes): cx = gap2d_cx(cx, w_in) cx = linear_cx(cx, w_in, num_classes, bias=True) return cx def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'w_in': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf2) del primals_2 del primals_3 return buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0) def gap2d(_w_in): """Helper for building a gap2d layer.""" return nn.AdaptiveAvgPool2d((1, 1)) def gap2d_cx(cx, _w_in): """Accumulates complexity of gap2d into cx = (h, w, flops, params, acts).""" flops, params, acts = cx['flops'], cx['params'], cx['acts'] return {'h': 1, 'w': 1, 'flops': flops, 'params': params, 'acts': acts} def linear(w_in, w_out, *, bias=False): """Helper for building a linear layer.""" return nn.Linear(w_in, w_out, bias=bias) def linear_cx(cx, w_in, w_out, *, bias=False, num_locations=1): """Accumulates complexity of linear into cx = (h, w, flops, params, acts).""" h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params' ], cx['acts'] flops += w_in * w_out * num_locations + (w_out * num_locations if bias else 0) params += w_in * w_out + (w_out if bias else 0) acts += w_out * num_locations return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts} class ResHeadNew(Module): """ResNet head: AvgPool, 1x1.""" def __init__(self, w_in, num_classes): super(ResHeadNew, self).__init__() self.avg_pool = gap2d(w_in) self.fc = linear(w_in, num_classes, bias=True) @staticmethod def complexity(cx, w_in, num_classes): cx = gap2d_cx(cx, w_in) cx = linear_cx(cx, w_in, num_classes, bias=True) return cx def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
MAC-AutoML/XCompression
ResHead
false
5,558
[ "MIT" ]
1
9f76eb3ccfb3057110ecf12aa48dec00a4667a25
https://github.com/MAC-AutoML/XCompression/tree/9f76eb3ccfb3057110ecf12aa48dec00a4667a25
from torch.nn import Module import torch import torch.nn as nn import torch.utils.data def gap2d(_w_in): """Helper for building a gap2d layer.""" return nn.AdaptiveAvgPool2d((1, 1)) def gap2d_cx(cx, _w_in): """Accumulates complexity of gap2d into cx = (h, w, flops, params, acts).""" flops, params, acts = cx['flops'], cx['params'], cx['acts'] return {'h': 1, 'w': 1, 'flops': flops, 'params': params, 'acts': acts} def linear(w_in, w_out, *, bias=False): """Helper for building a linear layer.""" return nn.Linear(w_in, w_out, bias=bias) def linear_cx(cx, w_in, w_out, *, bias=False, num_locations=1): """Accumulates complexity of linear into cx = (h, w, flops, params, acts).""" h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params' ], cx['acts'] flops += w_in * w_out * num_locations + (w_out * num_locations if bias else 0) params += w_in * w_out + (w_out if bias else 0) acts += w_out * num_locations return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts} class Model(Module): """ResNet head: AvgPool, 1x1.""" def __init__(self, w_in, num_classes): super().__init__() self.avg_pool = gap2d(w_in) self.fc = linear(w_in, num_classes, bias=True) def forward(self, x): x = self.avg_pool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x @staticmethod def complexity(cx, w_in, num_classes): cx = gap2d_cx(cx, w_in) cx = linear_cx(cx, w_in, num_classes, bias=True) return cx def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
MaxPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ud/cude6zl4nio2ly5l3l5cwlmxkoqtt4qkekbvrzk6nz7rpwc6ypf3.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=float("-inf")) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp16 & xmask, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + x0 + (4*x1) + (16*x2)), tmp23 & xmask, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + ((-2) + x0 + (4*x1) + (16*x2)), tmp30 & xmask, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = x1 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp9 tmp38 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp37 & xmask, other=float("-inf")) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = tmp36 & tmp15 tmp41 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp40 & xmask, other=float("-inf")) tmp42 = triton_helpers.maximum(tmp41, tmp39) tmp43 = tmp36 & tmp22 tmp44 = tl.load(in_ptr0 + (1 + x0 + (4*x1) + (16*x2)), tmp43 & xmask, other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp42) tmp46 = tmp36 & tmp29 tmp47 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*x2)), tmp46 & xmask, other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = 1 + x1 tmp50 = tmp49 >= tmp1 tmp51 = tmp49 < tmp3 tmp52 = tmp50 & tmp51 tmp53 = tmp52 & tmp9 tmp54 = tl.load(in_ptr0 + (3 + x0 + (4*x1) + (16*x2)), tmp53 & xmask, other=float("-inf")) tmp55 = triton_helpers.maximum(tmp54, tmp48) tmp56 = tmp52 & tmp15 tmp57 = tl.load(in_ptr0 + (4 + x0 + (4*x1) + (16*x2)), tmp56 & xmask, other=float("-inf")) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = tmp52 & tmp22 tmp60 = tl.load(in_ptr0 + (5 + x0 + (4*x1) + (16*x2)), tmp59 & xmask, other=float("-inf")) tmp61 = triton_helpers.maximum(tmp60, tmp58) tmp62 = tmp52 & tmp29 tmp63 = tl.load(in_ptr0 + (6 + x0 + (4*x1) + (16*x2)), tmp62 & xmask, other=float("-inf")) tmp64 = triton_helpers.maximum(tmp63, tmp61) tmp65 = 2 + x1 tmp66 = tmp65 >= tmp1 tmp67 = tmp65 < tmp3 tmp68 = tmp66 & tmp67 tmp69 = tmp68 & tmp9 tmp70 = tl.load(in_ptr0 + (7 + x0 + (4*x1) + (16*x2)), tmp69 & xmask, other=float("-inf")) tmp71 = triton_helpers.maximum(tmp70, tmp64) tmp72 = tmp68 & tmp15 tmp73 = tl.load(in_ptr0 + (8 + x0 + (4*x1) + (16*x2)), tmp72 & xmask, other=float("-inf")) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp68 & tmp22 tmp76 = tl.load(in_ptr0 + (9 + x0 + (4*x1) + (16*x2)), tmp75 & xmask, other=float("-inf")) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = tmp68 & tmp29 tmp79 = tl.load(in_ptr0 + (10 + x0 + (4*x1) + (16*x2)), tmp78 & xmask, other=float("-inf")) tmp80 = triton_helpers.maximum(tmp79, tmp77) tl.store(out_ptr0 + (x4), tmp80, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim import torch.utils.data class MaxPool(nn.Module): def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False): super(MaxPool, self).__init__() self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding) def forward(self, x): if self.zero_pad: x = self.zero_pad(x) x = self.pool(x) if self.zero_pad: x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1 + 16 * x2), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + (-2 + x0 + 4 * x1 + 16 * x2), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = x1 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp9 tmp38 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp37 & xmask, other=float('-inf')) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = tmp36 & tmp15 tmp41 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp40 & xmask, other =float('-inf')) tmp42 = triton_helpers.maximum(tmp41, tmp39) tmp43 = tmp36 & tmp22 tmp44 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp42) tmp46 = tmp36 & tmp29 tmp47 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = 1 + x1 tmp50 = tmp49 >= tmp1 tmp51 = tmp49 < tmp3 tmp52 = tmp50 & tmp51 tmp53 = tmp52 & tmp9 tmp54 = tl.load(in_ptr0 + (3 + x0 + 4 * x1 + 16 * x2), tmp53 & xmask, other=float('-inf')) tmp55 = triton_helpers.maximum(tmp54, tmp48) tmp56 = tmp52 & tmp15 tmp57 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), tmp56 & xmask, other=float('-inf')) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = tmp52 & tmp22 tmp60 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), tmp59 & xmask, other=float('-inf')) tmp61 = triton_helpers.maximum(tmp60, tmp58) tmp62 = tmp52 & tmp29 tmp63 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), tmp62 & xmask, other=float('-inf')) tmp64 = triton_helpers.maximum(tmp63, tmp61) tmp65 = 2 + x1 tmp66 = tmp65 >= tmp1 tmp67 = tmp65 < tmp3 tmp68 = tmp66 & tmp67 tmp69 = tmp68 & tmp9 tmp70 = tl.load(in_ptr0 + (7 + x0 + 4 * x1 + 16 * x2), tmp69 & xmask, other=float('-inf')) tmp71 = triton_helpers.maximum(tmp70, tmp64) tmp72 = tmp68 & tmp15 tmp73 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), tmp72 & xmask, other=float('-inf')) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp68 & tmp22 tmp76 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), tmp75 & xmask, other=float('-inf')) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = tmp68 & tmp29 tmp79 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), tmp78 & xmask, other=float('-inf')) tmp80 = triton_helpers.maximum(tmp79, tmp77) tl.store(out_ptr0 + x4, tmp80, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(144)](arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class MaxPoolNew(nn.Module): def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False): super(MaxPoolNew, self).__init__() self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LongKt7/Face_Recognize_Pytorch
MaxPool
false
5,559
[ "MIT" ]
1
baa02e633d379abe1001c8b8acb942617177329c
https://github.com/LongKt7/Face_Recognize_Pytorch/tree/baa02e633d379abe1001c8b8acb942617177329c
import torch import torch.nn as nn import torch.optim import torch.utils.data class Model(nn.Module): def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False): super().__init__() self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding) def forward(self, x): if self.zero_pad: x = self.zero_pad(x) x = self.pool(x) if self.zero_pad: x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
UsBlock_nounpool
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rl/crlzvualg2za225vkja5fjpnxss76chgi2glfm2hpmsgp2hm7zo3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%squeeze_5, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) class UsBlock_nounpool(nn.Module): def __init__(self, in_channels, out_channels, up_mode='transpose'): super(UsBlock_nounpool, self).__init__() self.conv = conv3x3(in_channels, out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2 def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) class UsBlock_nounpoolNew(nn.Module): def __init__(self, in_channels, out_channels, up_mode='transpose'): super(UsBlock_nounpoolNew, self).__init__() self.conv = conv3x3(in_channels, out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
MATHplus-Young-Academy/P2-Cardiac-Motion
UsBlock_nounpool
false
5,560
[ "Apache-2.0" ]
1
844995e8e5760f981c425d13c0bd7f2f3bb8baec
https://github.com/MATHplus-Young-Academy/P2-Cardiac-Motion/tree/844995e8e5760f981c425d13c0bd7f2f3bb8baec
import torch import torch.nn as nn def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) class Model(nn.Module): def __init__(self, in_channels, out_channels, up_mode='transpose'): super().__init__() self.conv = conv3x3(in_channels, out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
WDV29LayerNormalization
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add_2, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_7, [3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/el/celxf6tvonm4hnojqioclt5acflsj3tnnwh4e7fon3fxf7kiua46.py # Topologically Sorted Source Nodes: [tanh, mul, weight, tanh_1, mul_1, bias, layer_norm], Original ATen: [aten.tanh, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # bias => add_1 # layer_norm => add_2, add_3, mul_2, mul_3, rsqrt, sub, var_mean # mul => mul # mul_1 => mul_1 # tanh => tanh # tanh_1 => tanh_1 # weight => add # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %tanh), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_4), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %tanh_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_7, [3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_7, %getitem_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %add_1), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr8 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp5 * tmp7 tmp10 = tmp8 + tmp9 tmp11 = tmp4 * tmp10 tmp14 = libdevice.tanh(tmp13) tmp15 = tmp12 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp11 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_7, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, mul, weight, tanh_1, mul_1, bias, layer_norm], Original ATen: [aten.tanh, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_tanh_1.run(primals_7, buf0, buf1, primals_3, primals_1, primals_4, primals_5, primals_2, primals_6, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_3 del primals_4 del primals_5 del primals_6 return (buf2, primals_1, primals_2, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import numbers import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class WDV29LayerNormalization(torch.nn.Module): """Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization`_ . .. math:: y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated separately over the last certain number dimensions which have to be of the shape specified by :attr:`normalized_shape`. :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1] \\times \\ldots \\times \\text{normalized\\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 5, 10, 10) >>> # With Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:]) >>> # Without Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False) >>> # Normalize over last two dimensions >>> m = nn.LayerNorm([10, 10]) >>> # Normalize over last dimension of size 10 >>> m = nn.LayerNorm(10) >>> # Activating the module >>> output = m(input) .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 """ __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] def __init__(self, normalized_shape, wd_require_gradient=False, eps= 1e-05, elementwise_affine=True): super(WDV29LayerNormalization, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = normalized_shape, self.normalized_shape = tuple(normalized_shape) self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter(torch.Tensor(*normalized_shape), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape) ) self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape)) self.bias = Parameter(torch.Tensor(*normalized_shape), requires_grad=wd_require_gradient) self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape)) self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.elementwise_affine: torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): weight = self.weight bias = self.bias weight = self.tanh_weight_weight * torch.tanh(weight ) + self.tanh_bias_weight bias = self.tanh_weight_bias * torch.tanh(bias) + self.tanh_bias_bias return F.layer_norm(input, self.normalized_shape, weight, bias, self.eps) def extra_repr(self): return ( '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}' .format(**self.__dict__)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'normalized_shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numbers import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp5 * tmp7 tmp10 = tmp8 + tmp9 tmp11 = tmp4 * tmp10 tmp14 = libdevice.tanh(tmp13) tmp15 = tmp12 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp11 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_7, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_tanh_1[grid(256)](primals_7, buf0, buf1, primals_3, primals_1, primals_4, primals_5, primals_2, primals_6, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_3 del primals_4 del primals_5 del primals_6 return buf2, primals_1, primals_2, primals_7 class WDV29LayerNormalizationNew(torch.nn.Module): """Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization`_ . .. math:: y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated separately over the last certain number dimensions which have to be of the shape specified by :attr:`normalized_shape`. :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1] \\times \\ldots \\times \\text{normalized\\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 5, 10, 10) >>> # With Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:]) >>> # Without Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False) >>> # Normalize over last two dimensions >>> m = nn.LayerNorm([10, 10]) >>> # Normalize over last dimension of size 10 >>> m = nn.LayerNorm(10) >>> # Activating the module >>> output = m(input) .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 """ __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] def __init__(self, normalized_shape, wd_require_gradient=False, eps= 1e-05, elementwise_affine=True): super(WDV29LayerNormalizationNew, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = normalized_shape, self.normalized_shape = tuple(normalized_shape) self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter(torch.Tensor(*normalized_shape), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape) ) self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape)) self.bias = Parameter(torch.Tensor(*normalized_shape), requires_grad=wd_require_gradient) self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape)) self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.elementwise_affine: torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return ( '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}' .format(**self.__dict__)) def forward(self, input_0): primals_1 = self.weight primals_2 = self.tanh_weight_weight primals_3 = self.tanh_weight_bias primals_4 = self.bias primals_5 = self.tanh_bias_weight primals_6 = self.tanh_bias_bias primals_7 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Lollipop321/weight-distillation
WDV29LayerNormalization
false
5,561
[ "BSD-3-Clause" ]
1
cfc76ec58e3e88094dde1825287b2968f9718431
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
import numbers import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class Model(torch.nn.Module): """Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization`_ . .. math:: y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated separately over the last certain number dimensions which have to be of the shape specified by :attr:`normalized_shape`. :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1] \\times \\ldots \\times \\text{normalized\\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 5, 10, 10) >>> # With Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:]) >>> # Without Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False) >>> # Normalize over last two dimensions >>> m = nn.LayerNorm([10, 10]) >>> # Normalize over last dimension of size 10 >>> m = nn.LayerNorm(10) >>> # Activating the module >>> output = m(input) .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 """ __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] def __init__(self, normalized_shape, wd_require_gradient=False, eps= 1e-05, elementwise_affine=True): super().__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = normalized_shape, self.normalized_shape = tuple(normalized_shape) self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter(torch.Tensor(*normalized_shape), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape) ) self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape)) self.bias = Parameter(torch.Tensor(*normalized_shape), requires_grad=wd_require_gradient) self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape)) self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parame # ... truncated (>4000 chars) for memory efficiency
WDV29Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rb/crbx6wdzdbdjlck75irsth4iy6smxxjgxz3q6l4of2njw7sqx3ug.py # Topologically Sorted Source Nodes: [tanh, mul, weight], Original ATen: [aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # tanh => tanh # weight => add # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %tanh), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_mul_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/j2/cj2txmrvo6fsowrygo4hnb7eq7omgzwo3kyzcghwvhkhqu5qxr4y.py # Topologically Sorted Source Nodes: [tanh_1, mul_1, bias], Original ATen: [aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # bias => add_1 # mul_1 => mul_1 # tanh_1 => tanh_1 # Graph fragment: # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_4,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %tanh_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {}) triton_poi_fused_add_mul_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, mul, weight], Original ATen: [aten.tanh, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_tanh_0.run(primals_2, primals_1, primals_3, buf0, 16, grid=grid(16), stream=stream0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [tanh_1, mul_1, bias], Original ATen: [aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_tanh_1.run(primals_5, primals_4, primals_6, buf1, 4, grid=grid(4), stream=stream0) del primals_5 del primals_6 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh_1, mul_1, bias, linear], Original ATen: [aten.tanh, aten.mul, aten.add, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_4, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class WDV29Linear(torch.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of additional dimensions and :math:`H_{in} = \\text{in\\_features}` - Output: :math:`(N, *, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where :math:`k = \\frac{1}{\\text{in\\_features}}` bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where :math:`k = \\frac{1}{\\text{in\\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ['bias', 'in_features', 'out_features'] def __init__(self, in_features, out_features, wd_require_gradient=False, bias=True): super(WDV29Linear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(out_features, in_features)) self.tanh_bias_weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features), requires_grad =wd_require_gradient) self.tanh_weight_bias = Parameter(torch.Tensor(out_features)) self.tanh_bias_bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input): weight = self.weight weight = self.tanh_weight_weight * torch.tanh(weight ) + self.tanh_bias_weight bias = self.bias bias = self.tanh_weight_bias * torch.tanh(bias) + self.tanh_bias_bias return F.linear(input, weight, bias) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_mul_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_tanh_0[grid(16)](primals_2, primals_1, primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_mul_tanh_1[grid(4)](primals_5, primals_4, primals_6, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_5 del primals_6 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_7, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_4, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0) class WDV29LinearNew(torch.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of additional dimensions and :math:`H_{in} = \\text{in\\_features}` - Output: :math:`(N, *, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where :math:`k = \\frac{1}{\\text{in\\_features}}` bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where :math:`k = \\frac{1}{\\text{in\\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ['bias', 'in_features', 'out_features'] def __init__(self, in_features, out_features, wd_require_gradient=False, bias=True): super(WDV29LinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(out_features, in_features)) self.tanh_bias_weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features), requires_grad =wd_require_gradient) self.tanh_weight_bias = Parameter(torch.Tensor(out_features)) self.tanh_bias_bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def forward(self, input_0): primals_1 = self.weight primals_2 = self.tanh_weight_weight primals_3 = self.tanh_bias_weight primals_4 = self.bias primals_5 = self.tanh_weight_bias primals_6 = self.tanh_bias_bias primals_7 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Lollipop321/weight-distillation
WDV29Linear
false
5,562
[ "BSD-3-Clause" ]
1
cfc76ec58e3e88094dde1825287b2968f9718431
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
import math import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class Model(torch.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of additional dimensions and :math:`H_{in} = \\text{in\\_features}` - Output: :math:`(N, *, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where :math:`k = \\frac{1}{\\text{in\\_features}}` bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where :math:`k = \\frac{1}{\\text{in\\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ['bias', 'in_features', 'out_features'] def __init__(self, in_features, out_features, wd_require_gradient=False, bias=True): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(out_features, in_features)) self.tanh_bias_weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features), requires_grad =wd_require_gradient) self.tanh_weight_bias = Parameter(torch.Tensor(out_features)) self.tanh_bias_bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input): weight = self.weight weight = self.tanh_weight_weight * torch.tanh(weight ) + self.tanh_bias_weight bias = self.bias bias = self.tanh_weight_bias * torch.tanh(bias) + self.tanh_bias_bias return F.linear(input, weight, bias) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
WDV52LayerNormalization
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add_2, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_13, [3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/el/celxf6tvonm4hnojqioclt5acflsj3tnnwh4e7fon3fxf7kiua46.py # Topologically Sorted Source Nodes: [tanh, mul, weight_wd_6, tanh_1, mul_1, bias_wd_6, layer_norm], Original ATen: [aten.tanh, aten.mul, aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # bias_wd_6 => add_1 # layer_norm => add_2, add_3, mul_2, mul_3, rsqrt, sub, var_mean # mul => mul # mul_1 => mul_1 # tanh => tanh # tanh_1 => tanh_1 # weight_wd_6 => add # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%squeeze,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %tanh), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%squeeze_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %tanh_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_13, [3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_13, %getitem_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %add_1), kwargs = {}) triton_poi_fused_add_mul_native_layer_norm_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr8 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp5 * tmp7 tmp10 = tmp8 + tmp9 tmp11 = tmp4 * tmp10 tmp14 = libdevice.tanh(tmp13) tmp15 = tmp12 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp11 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, 1), (1, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), primals_3, out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_4], Original ATen: [aten.mm] extern_kernels.mm(buf1, primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias_wd_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), primals_8, out=buf3) del primals_8 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias_wd_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (1, 4), 0), primals_9, out=buf4) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [bias_wd_4], Original ATen: [aten.mm] extern_kernels.mm(buf4, primals_10, out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_13, buf6, buf7, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, mul, weight_wd_6, tanh_1, mul_1, bias_wd_6, layer_norm], Original ATen: [aten.tanh, aten.mul, aten.add, aten.native_layer_norm] triton_poi_fused_add_mul_native_layer_norm_tanh_1.run(primals_13, buf6, buf7, primals_5, buf2, primals_6, primals_11, buf5, primals_12, buf8, 256, grid=grid(256), stream=stream0) del buf6 del buf7 del primals_12 del primals_6 return (buf8, primals_3, primals_5, primals_9, primals_11, primals_13, buf2, buf5, reinterpret_tensor(buf4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), buf3, primals_7, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import numbers import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class WDV52LayerNormalization(torch.nn.Module): """Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization`_ . .. math:: y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated separately over the last certain number dimensions which have to be of the shape specified by :attr:`normalized_shape`. :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1] \\times \\ldots \\times \\text{normalized\\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 5, 10, 10) >>> # With Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:]) >>> # Without Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False) >>> # Normalize over last two dimensions >>> m = nn.LayerNorm([10, 10]) >>> # Normalize over last dimension of size 10 >>> m = nn.LayerNorm(10) >>> # Activating the module >>> output = m(input) .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 """ __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] def __init__(self, ts, normalized_shape, wd_normalized_shape, wd_decoder_layers, wd_require_gradient=False, eps=1e-05, elementwise_affine=True): super(WDV52LayerNormalization, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = normalized_shape, self.normalized_shape = tuple(normalized_shape) self.ts = ts self.eps = eps self.elementwise_affine = elementwise_affine self.wd_decoder_layers = wd_decoder_layers if self.elementwise_affine: self.out_wd_weight = Parameter(torch.Tensor(wd_normalized_shape, *normalized_shape)) self.ly_wd_weight = Parameter(torch.Tensor(ts, ts)) self.wd_weight = Parameter(torch.Tensor(ts, 1)) self.weight = Parameter(torch.Tensor(wd_normalized_shape, ts), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape) ) self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape)) self.out_wd_bias = Parameter(torch.Tensor(wd_normalized_shape, *normalized_shape)) self.ly_wd_bias = Parameter(torch.Tensor(ts, ts)) self.wd_bias = Parameter(torch.Tensor(ts, 1)) self.bias = Parameter(torch.Tensor(wd_normalized_shape, ts), requires_grad=wd_require_gradient) self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape)) self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.elementwise_affine: torch.nn.init.xavier_normal_(self.out_wd_weight) torch.nn.init.xavier_normal_(self.ly_wd_weight) torch.nn.init.xavier_normal_(self.out_wd_bias) torch.nn.init.xavier_normal_(self.ly_wd_bias) torch.nn.init.constant_(self.wd_weight, 1 / self.ts) torch.nn.init.constant_(self.wd_bias, 1 / self.ts) torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): weight = self.weight weight_wd = torch.transpose(weight, 0, 1) weight_wd = torch.matmul(weight_wd, self.out_wd_weight) weight_wd = torch.transpose(weight_wd, 0, 1) weight_wd = torch.matmul(weight_wd, self.ly_wd_weight) weight_wd = torch.matmul(weight_wd, self.wd_weight) weight_wd = weight_wd.squeeze(-1) weight_wd = self.tanh_weight_weight * torch.tanh(weight_wd ) + self.tanh_bias_weight bias = self.bias bias_wd = torch.transpose(bias, 0, 1) bias_wd = torch.matmul(bias_wd, self.out_wd_bias) bias_wd = torch.transpose(bias_wd, 0, 1) bias_wd = torch.matmul(bias_wd, self.ly_wd_bias) bias_wd = torch.matmul(bias_wd, self.wd_bias) bias_wd = bias_wd.squeeze(-1) bias_wd = self.tanh_weight_bias * torch.tanh(bias_wd ) + self.tanh_bias_bias return F.layer_norm(input, self.normalized_shape, weight_wd, bias_wd, self.eps) def extra_repr(self): return ( '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}' .format(**self.__dict__)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ts': 4, 'normalized_shape': 4, 'wd_normalized_shape': 4, 'wd_decoder_layers': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numbers import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_mul_native_layer_norm_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr8 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp7 = libdevice.tanh(tmp6) tmp8 = tmp5 * tmp7 tmp10 = tmp8 + tmp9 tmp11 = tmp4 * tmp10 tmp14 = libdevice.tanh(tmp13) tmp15 = tmp12 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp11 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, 1), (1, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), primals_3, out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), primals_8, out=buf3) del primals_8 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (1, 4), 0), primals_9, out=buf4) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf4, primals_10, out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_13, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_native_layer_norm_tanh_1[grid(256)](primals_13 , buf6, buf7, primals_5, buf2, primals_6, primals_11, buf5, primals_12, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del buf7 del primals_12 del primals_6 return (buf8, primals_3, primals_5, primals_9, primals_11, primals_13, buf2, buf5, reinterpret_tensor(buf4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), buf3, primals_7, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), reinterpret_tensor( primals_4, (1, 4), (1, 1), 0), buf0, primals_1) class WDV52LayerNormalizationNew(torch.nn.Module): """Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization`_ . .. math:: y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated separately over the last certain number dimensions which have to be of the shape specified by :attr:`normalized_shape`. :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1] \\times \\ldots \\times \\text{normalized\\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 5, 10, 10) >>> # With Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:]) >>> # Without Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False) >>> # Normalize over last two dimensions >>> m = nn.LayerNorm([10, 10]) >>> # Normalize over last dimension of size 10 >>> m = nn.LayerNorm(10) >>> # Activating the module >>> output = m(input) .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 """ __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] def __init__(self, ts, normalized_shape, wd_normalized_shape, wd_decoder_layers, wd_require_gradient=False, eps=1e-05, elementwise_affine=True): super(WDV52LayerNormalizationNew, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = normalized_shape, self.normalized_shape = tuple(normalized_shape) self.ts = ts self.eps = eps self.elementwise_affine = elementwise_affine self.wd_decoder_layers = wd_decoder_layers if self.elementwise_affine: self.out_wd_weight = Parameter(torch.Tensor(wd_normalized_shape, *normalized_shape)) self.ly_wd_weight = Parameter(torch.Tensor(ts, ts)) self.wd_weight = Parameter(torch.Tensor(ts, 1)) self.weight = Parameter(torch.Tensor(wd_normalized_shape, ts), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape) ) self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape)) self.out_wd_bias = Parameter(torch.Tensor(wd_normalized_shape, *normalized_shape)) self.ly_wd_bias = Parameter(torch.Tensor(ts, ts)) self.wd_bias = Parameter(torch.Tensor(ts, 1)) self.bias = Parameter(torch.Tensor(wd_normalized_shape, ts), requires_grad=wd_require_gradient) self.tanh_bias_weight = Parameter(torch.Tensor(*normalized_shape)) self.tanh_bias_bias = Parameter(torch.Tensor(*normalized_shape)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.elementwise_affine: torch.nn.init.xavier_normal_(self.out_wd_weight) torch.nn.init.xavier_normal_(self.ly_wd_weight) torch.nn.init.xavier_normal_(self.out_wd_bias) torch.nn.init.xavier_normal_(self.ly_wd_bias) torch.nn.init.constant_(self.wd_weight, 1 / self.ts) torch.nn.init.constant_(self.wd_bias, 1 / self.ts) torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return ( '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}' .format(**self.__dict__)) def forward(self, input_0): primals_1 = self.out_wd_weight primals_2 = self.ly_wd_weight primals_4 = self.wd_weight primals_3 = self.weight primals_5 = self.tanh_weight_weight primals_6 = self.tanh_weight_bias primals_7 = self.out_wd_bias primals_8 = self.ly_wd_bias primals_10 = self.wd_bias primals_9 = self.bias primals_11 = self.tanh_bias_weight primals_12 = self.tanh_bias_bias primals_13 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Lollipop321/weight-distillation
WDV52LayerNormalization
false
5,563
[ "BSD-3-Clause" ]
1
cfc76ec58e3e88094dde1825287b2968f9718431
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
import numbers import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class Model(torch.nn.Module): """Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization`_ . .. math:: y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta The mean and standard-deviation are calculated separately over the last certain number dimensions which have to be of the shape specified by :attr:`normalized_shape`. :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1] \\times \\ldots \\times \\text{normalized\\_shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 5, 10, 10) >>> # With Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:]) >>> # Without Learnable Parameters >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False) >>> # Normalize over last two dimensions >>> m = nn.LayerNorm([10, 10]) >>> # Normalize over last dimension of size 10 >>> m = nn.LayerNorm(10) >>> # Activating the module >>> output = m(input) .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 """ __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] def __init__(self, ts, normalized_shape, wd_normalized_shape, wd_decoder_layers, wd_require_gradient=False, eps=1e-05, elementwise_affine=True): super().__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = normalized_shape, self.normalized_shape = tuple(normalized_shape) self.ts = ts self.eps = eps self.elementwise_affine = elementwise_affine self.wd_decoder_layers = wd_decoder_layers if self.elementwise_affine: self.out_wd_weight = Parameter(torch.Tensor(wd_normalized_shape, *normalized_shape)) self.ly_wd_weight = Parameter(torch.Tensor(ts, ts)) self.wd_weight = Parameter(torch.Tensor(ts, 1)) self.weight = Parameter(torch.Tensor(wd_normalized_shape, ts), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(*normalized_shape) ) self.tanh_weight_bias = Parameter(torch.Tensor(*normalized_shape)) self.out_wd_bias = Parameter(torch.Tensor(wd_normalized_shape, # ... truncated (>4000 chars) for memory efficiency
UsBlockRes
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/7y/c7y5ocwgrsytsbdmcmeke6s3x47svfjto6zudqpi4u7tnys7gcnh.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %primals_4), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = (xindex // 512) tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4e/c4eof5cmaf74zvhv7uwyqomvkalvly4gkan7imrv54mxkkvb64lk.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_3 => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%squeeze_6, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = (xindex // 512) tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2, 2), (32, 8, 4, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8, 8, 8), (512, 64, 8, 1)) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 8, 8), (512, 64, 8, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_2, primals_4, 2048, grid=grid(2048), stream=stream0) del primals_2 del primals_4 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 8, 8, 8), (0, 512, 64, 8, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1)) buf3 = reinterpret_tensor(buf2, (4, 8, 8, 8), (512, 64, 8, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_6, buf4, 2048, grid=grid(2048), stream=stream0) del primals_6 return (buf3, primals_1, primals_5, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2, 2, 2), (32, 8, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 8, 8, 8), (512, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) def conv1x1(in_channels, out_channels): return nn.Conv3d(in_channels, out_channels, kernel_size=1) def upconv2x2(in_channels, out_channels, mode='transpose'): if mode == 'transpose': return nn.ConvTranspose3d(in_channels, out_channels, kernel_size=2, stride=2) else: return nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), conv1x1(in_channels, out_channels)) class UsBlockRes(nn.Module): def __init__(self, in_channels, out_channels, up_mode='transpose'): super(UsBlockRes, self).__init__() self.upconv = upconv2x2(in_channels, out_channels, mode=up_mode) self.conv = conv3x3(out_channels, out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, before_pool, x): x = self.upconv(x) x = x + before_pool x = self.conv(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 8, 8, 8]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2, 2), (32, 8, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8, 8, 8), (512, 64, 8, 1)) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 8, 8), (512, 64, 8, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(2048)](buf1, primals_2, primals_4, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 8, 8, 8), (0, 512, 64, 8, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1)) buf3 = reinterpret_tensor(buf2, (4, 8, 8, 8), (512, 64, 8, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3, primals_6, buf4, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf3, primals_1, primals_5, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1), 0), buf4 def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) def conv1x1(in_channels, out_channels): return nn.Conv3d(in_channels, out_channels, kernel_size=1) def upconv2x2(in_channels, out_channels, mode='transpose'): if mode == 'transpose': return nn.ConvTranspose3d(in_channels, out_channels, kernel_size=2, stride=2) else: return nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), conv1x1(in_channels, out_channels)) class UsBlockResNew(nn.Module): def __init__(self, in_channels, out_channels, up_mode='transpose'): super(UsBlockResNew, self).__init__() self.upconv = upconv2x2(in_channels, out_channels, mode=up_mode) self.conv = conv3x3(out_channels, out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, input_0, input_1): primals_1 = self.upconv.weight primals_2 = self.upconv.bias primals_5 = self.conv.weight primals_6 = self.conv.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
MATHplus-Young-Academy/P2-Cardiac-Motion
UsBlockRes
false
5,564
[ "Apache-2.0" ]
1
844995e8e5760f981c425d13c0bd7f2f3bb8baec
https://github.com/MATHplus-Young-Academy/P2-Cardiac-Motion/tree/844995e8e5760f981c425d13c0bd7f2f3bb8baec
import torch import torch.nn as nn def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) def conv1x1(in_channels, out_channels): return nn.Conv3d(in_channels, out_channels, kernel_size=1) def upconv2x2(in_channels, out_channels, mode='transpose'): if mode == 'transpose': return nn.ConvTranspose3d(in_channels, out_channels, kernel_size=2, stride=2) else: return nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), conv1x1(in_channels, out_channels)) class Model(nn.Module): def __init__(self, in_channels, out_channels, up_mode='transpose'): super().__init__() self.upconv = upconv2x2(in_channels, out_channels, mode=up_mode) self.conv = conv3x3(out_channels, out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, before_pool, x): x = self.upconv(x) x = x + before_pool x = self.conv(x) x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 8, 8, 8]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ContinousRotReprDecoder
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/hd/chd4rxbi63w5iepye5w4zqxwpml3db4bngunfmfplycvf2qoeigi.py # Topologically Sorted Source Nodes: [b1, mul, dot_prod], Original ATen: [aten.div, aten.mul, aten.sum] # Source node to ATen node mapping: # b1 => div # dot_prod => sum_2 # mul => mul # Graph fragment: # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %expand), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %select_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {}) triton_poi_fused_div_mul_sum_0 = async_compile.triton('triton_poi_fused_div_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (6*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (2 + (6*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + (6*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + (6*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (6*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (5 + (6*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-12 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = tmp0 / tmp10 tmp13 = tmp11 * tmp12 tmp14 = tmp2 / tmp10 tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tmp5 / tmp10 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tl.store(out_ptr0 + (x0), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/or/corli2hhegxx5f3pgn3genmxh3u3xzhbqybdatbsgwql66rinl3a.py # Topologically Sorted Source Nodes: [b1, mul, dot_prod, mul_1, sub], Original ATen: [aten.div, aten.mul, aten.sum, aten.sub] # Source node to ATen node mapping: # b1 => div # dot_prod => sum_2 # mul => mul # mul_1 => mul_1 # sub => sub # Graph fragment: # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %expand), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %select_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %div), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %mul_1), kwargs = {}) triton_poi_fused_div_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_div_mul_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 3) tmp0 = tl.load(in_ptr0 + (1 + (2*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (2*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (6*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (6*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + (6*x1)), xmask, eviction_policy='evict_last') tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp2 / tmp13 tmp15 = tmp1 * tmp14 tmp16 = tmp0 - tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6u/c6ujw2gsz4fiffrddbz6pylpsw3ourbw2ir2dtfko3n5r3gxebkj.py # Topologically Sorted Source Nodes: [b1, b2, b3], Original ATen: [aten.div, aten.linalg_cross] # Source node to ATen node mapping: # b1 => div # b2 => div_1 # b3 => index, index_1, index_2, index_3, mul_2, mul_3 # Graph fragment: # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %expand), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %expand_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div, [None, %remainder]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div_1, [None, %remainder_1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div, [None, %remainder_2]), kwargs = {}) # %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%div_1, [None, %remainder_3]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %index_3), kwargs = {}) triton_poi_fused_div_linalg_cross_2 = async_compile.triton('triton_poi_fused_div_linalg_cross_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_cross_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_linalg_cross_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*((1 + x0) % 3)) + (6*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (6*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (6*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (4 + (6*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + ((3*x1) + ((2 + x0) % 3)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (3*x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + (3*x1)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + ((2*((2 + x0) % 3)) + (6*x1)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + ((3*x1) + ((1 + x0) % 3)), xmask) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 1e-12 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tmp0 / tmp11 tmp15 = tmp14 * tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = triton_helpers.maximum(tmp22, tmp10) tmp24 = tmp13 / tmp23 tmp25 = tmp12 * tmp24 tmp27 = tmp26 / tmp11 tmp29 = tmp28 / tmp23 tmp30 = tmp27 * tmp29 tl.store(out_ptr0 + (x2), tmp25, xmask) tl.store(out_ptr1 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6w/c6woiqqq2vb4nax2oe4lql3ogk6yfdkg35mdxnytclkct4emsqlb.py # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2], -1), kwargs = {}) triton_poi_fused_stack_3 = async_compile.triton('triton_poi_fused_stack_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x3 = (xindex // 3) x2 = (xindex // 9) x5 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2*x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (6*x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.load(in_ptr0 + (2 + (6*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr0 + (4 + (6*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = 1e-12 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp5 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp4, tmp17, tmp18) tmp20 = tmp0 >= tmp3 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr1 + (x3), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + (3*x2), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp25 * tmp25 tmp27 = tl.load(in_ptr1 + (1 + (3*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp27 * tmp27 tmp29 = tmp26 + tmp28 tmp30 = tl.load(in_ptr1 + (2 + (3*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp15) tmp35 = tmp24 / tmp34 tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp23, tmp35, tmp36) tmp38 = tmp0 >= tmp21 tmp39 = tl.full([1], 3, tl.int64) tmp40 = tmp0 < tmp39 tmp41 = tl.load(in_ptr2 + (x3), tmp38 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr3 + (x3), tmp38 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp38, tmp43, tmp44) tmp46 = tl.where(tmp23, tmp37, tmp45) tmp47 = tl.where(tmp4, tmp19, tmp46) tl.store(out_ptr0 + (x5), tmp47, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 3, 2), (6, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [b1, mul, dot_prod], Original ATen: [aten.div, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_sum_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [b1, mul, dot_prod, mul_1, sub], Original ATen: [aten.div, aten.mul, aten.sum, aten.sub] triton_poi_fused_div_mul_sub_sum_1.run(arg0_1, buf0, buf1, 12, grid=grid(12), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 3), (3, 1), torch.float32) buf3 = empty_strided_cuda((4, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [b1, b2, b3], Original ATen: [aten.div, aten.linalg_cross] triton_poi_fused_div_linalg_cross_2.run(arg0_1, buf1, buf2, buf3, 12, grid=grid(12), stream=stream0) buf4 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] triton_poi_fused_stack_3.run(arg0_1, buf1, buf2, buf3, buf4, 36, grid=grid(36), stream=stream0) del arg0_1 del buf1 del buf2 del buf3 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 3, 2), (6, 2, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ContinousRotReprDecoder(nn.Module): def __init__(self): super(ContinousRotReprDecoder, self).__init__() def forward(self, module_input): reshaped_input = module_input.view(-1, 3, 2) b1 = F.normalize(reshaped_input[:, :, 0], dim=1) dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True) b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1) b3 = torch.cross(b1, b2, dim=1) return torch.stack([b1, b2, b3], dim=-1) def get_inputs(): return [torch.rand([4, 3, 2])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp8 = libdevice.sqrt(tmp7) tmp9 = 1e-12 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = tmp0 / tmp10 tmp13 = tmp11 * tmp12 tmp14 = tmp2 / tmp10 tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tmp5 / tmp10 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tl.store(out_ptr0 + x0, tmp21, xmask) @triton.jit def triton_poi_fused_div_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_ptr0 + (1 + 2 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + 2 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (4 + 6 * x1), xmask, eviction_policy='evict_last') tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp2 / tmp13 tmp15 = tmp1 * tmp14 tmp16 = tmp0 - tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_div_linalg_cross_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * ((1 + x0) % 3) + 6 * x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (4 + 6 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (3 * x1 + (2 + x0) % 3), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 3 * x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + (2 * ((2 + x0) % 3) + 6 * x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 * x1 + (1 + x0) % 3), xmask) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 1e-12 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tmp0 / tmp11 tmp15 = tmp14 * tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = triton_helpers.maximum(tmp22, tmp10) tmp24 = tmp13 / tmp23 tmp25 = tmp12 * tmp24 tmp27 = tmp26 / tmp11 tmp29 = tmp28 / tmp23 tmp30 = tmp27 * tmp29 tl.store(out_ptr0 + x2, tmp25, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 36 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x3 = xindex // 3 x2 = xindex // 9 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 2 * x3, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + 6 * x2, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.load(in_ptr0 + (2 + 6 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tl.load(in_ptr0 + (4 + 6 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = 1e-12 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = tmp5 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp4, tmp17, tmp18) tmp20 = tmp0 >= tmp3 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr1 + x3, tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + 3 * x2, tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp25 * tmp25 tmp27 = tl.load(in_ptr1 + (1 + 3 * x2), tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tmp27 * tmp27 tmp29 = tmp26 + tmp28 tmp30 = tl.load(in_ptr1 + (2 + 3 * x2), tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tmp30 * tmp30 tmp32 = tmp29 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp15) tmp35 = tmp24 / tmp34 tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp23, tmp35, tmp36) tmp38 = tmp0 >= tmp21 tl.full([1], 3, tl.int64) tmp41 = tl.load(in_ptr2 + x3, tmp38 & xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr3 + x3, tmp38 & xmask, eviction_policy= 'evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp38, tmp43, tmp44) tmp46 = tl.where(tmp23, tmp37, tmp45) tmp47 = tl.where(tmp4, tmp19, tmp46) tl.store(out_ptr0 + x5, tmp47, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 3, 2), (6, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sum_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 3), (3, 1), torch.float32) triton_poi_fused_div_mul_sub_sum_1[grid(12)](arg0_1, buf0, buf1, 12, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 3), (3, 1), torch.float32) buf3 = empty_strided_cuda((4, 3), (3, 1), torch.float32) triton_poi_fused_div_linalg_cross_2[grid(12)](arg0_1, buf1, buf2, buf3, 12, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 3, 3), (9, 3, 1), torch.float32) triton_poi_fused_stack_3[grid(36)](arg0_1, buf1, buf2, buf3, buf4, 36, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf1 del buf2 del buf3 return buf4, class ContinousRotReprDecoderNew(nn.Module): def __init__(self): super(ContinousRotReprDecoderNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LuckyDC/human_body_prior
ContinousRotReprDecoder
false
5,565
[ "Xnet", "X11" ]
1
6a46613b4cbd9c62d888359f1435cec501643af3
https://github.com/LuckyDC/human_body_prior/tree/6a46613b4cbd9c62d888359f1435cec501643af3
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() def forward(self, module_input): reshaped_input = module_input.view(-1, 3, 2) b1 = F.normalize(reshaped_input[:, :, 0], dim=1) dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True) b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1) b3 = torch.cross(b1, b2, dim=1) return torch.stack([b1, b2, b3], dim=-1) def get_inputs(): return [torch.rand([4, 3, 2])] def get_init_inputs(): return []
BCEWithLogitsLoss2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5u/c5u7c3xfjaazdtxwasop4c2d3vlwaagqtikdsnpoz56tcxjbvfvx.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %view), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %view), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%view,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class BCEWithLogitsLoss2d(nn.Module): """Computationally stable version of 2D BCE loss """ def __init__(self, weight=None, reduction='elementwise_mean'): super(BCEWithLogitsLoss2d, self).__init__() if isinstance(weight, np.ndarray): weight = torch.from_numpy(weight) self.bce_loss = nn.BCEWithLogitsLoss(weight, reduction) def forward(self, logits, targets): logits_flat = logits.view(-1) targets_flat = targets.view(-1) return self.bce_loss(logits_flat, targets_flat) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCEWithLogitsLoss2dNew(nn.Module): """Computationally stable version of 2D BCE loss """ def __init__(self, weight=None, reduction='elementwise_mean'): super(BCEWithLogitsLoss2dNew, self).__init__() if isinstance(weight, np.ndarray): weight = torch.from_numpy(weight) self.bce_loss = nn.BCEWithLogitsLoss(weight, reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MIPT-Oulu/3D-Histo-Grading
BCEWithLogitsLoss2d
false
5,566
[ "MIT" ]
1
b779a154d0e5b104fc152c8952124768fb7b1dc6
https://github.com/MIPT-Oulu/3D-Histo-Grading/tree/b779a154d0e5b104fc152c8952124768fb7b1dc6
import torch import numpy as np import torch.nn as nn class Model(nn.Module): """Computationally stable version of 2D BCE loss """ def __init__(self, weight=None, reduction='elementwise_mean'): super().__init__() if isinstance(weight, np.ndarray): weight = torch.from_numpy(weight) self.bce_loss = nn.BCEWithLogitsLoss(weight, reduction) def forward(self, logits, targets): logits_flat = logits.view(-1) targets_flat = targets.view(-1) return self.bce_loss(logits_flat, targets_flat) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ViTStemPatchify
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn as nn import torch.utils.data def patchify2d(w_in, w_out, k, *, bias=True): """Helper for building a patchify layer as used by ViT models.""" return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias) def patchify2d_cx(cx, w_in, w_out, k, *, bias=True): """Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts).""" err_str = 'Only kernel sizes divisible by the input size are supported.' assert cx['h'] % k == 0 and cx['w'] % k == 0, err_str h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params' ], cx['acts'] h, w = h // k, w // k flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0) params += k * k * w_in * w_out + (w_out if bias else 0) acts += w_out * h * w return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts} class ViTStemPatchify(Module): """The patchify vision transformer stem as per https://arxiv.org/abs/2010.11929.""" def __init__(self, w_in, w_out, k): super(ViTStemPatchify, self).__init__() self.patchify = patchify2d(w_in, w_out, k, bias=True) def forward(self, x): return self.patchify(x) @staticmethod def complexity(cx, w_in, w_out, k): return patchify2d_cx(cx, w_in, w_out, k, bias=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'w_in': 4, 'w_out': 4, 'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3 def patchify2d(w_in, w_out, k, *, bias=True): """Helper for building a patchify layer as used by ViT models.""" return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias) def patchify2d_cx(cx, w_in, w_out, k, *, bias=True): """Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts).""" err_str = 'Only kernel sizes divisible by the input size are supported.' assert cx['h'] % k == 0 and cx['w'] % k == 0, err_str h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params' ], cx['acts'] h, w = h // k, w // k flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0) params += k * k * w_in * w_out + (w_out if bias else 0) acts += w_out * h * w return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts} class ViTStemPatchifyNew(Module): """The patchify vision transformer stem as per https://arxiv.org/abs/2010.11929.""" def __init__(self, w_in, w_out, k): super(ViTStemPatchifyNew, self).__init__() self.patchify = patchify2d(w_in, w_out, k, bias=True) @staticmethod def complexity(cx, w_in, w_out, k): return patchify2d_cx(cx, w_in, w_out, k, bias=True) def forward(self, input_0): primals_1 = self.patchify.weight primals_2 = self.patchify.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
MAC-AutoML/XCompression
ViTStemPatchify
false
5,567
[ "MIT" ]
1
9f76eb3ccfb3057110ecf12aa48dec00a4667a25
https://github.com/MAC-AutoML/XCompression/tree/9f76eb3ccfb3057110ecf12aa48dec00a4667a25
from torch.nn import Module import torch import torch.nn as nn import torch.utils.data def patchify2d(w_in, w_out, k, *, bias=True): """Helper for building a patchify layer as used by ViT models.""" return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias) def patchify2d_cx(cx, w_in, w_out, k, *, bias=True): """Accumulates complexity of patchify2d into cx = (h, w, flops, params, acts).""" err_str = 'Only kernel sizes divisible by the input size are supported.' assert cx['h'] % k == 0 and cx['w'] % k == 0, err_str h, w, flops, params, acts = cx['h'], cx['w'], cx['flops'], cx['params' ], cx['acts'] h, w = h // k, w // k flops += k * k * w_in * w_out * h * w + (w_out * h * w if bias else 0) params += k * k * w_in * w_out + (w_out if bias else 0) acts += w_out * h * w return {'h': h, 'w': w, 'flops': flops, 'params': params, 'acts': acts} class Model(Module): """The patchify vision transformer stem as per https://arxiv.org/abs/2010.11929.""" def __init__(self, w_in, w_out, k): super().__init__() self.patchify = patchify2d(w_in, w_out, k, bias=True) def forward(self, x): return self.patchify(x) @staticmethod def complexity(cx, w_in, w_out, k): return patchify2d_cx(cx, w_in, w_out, k, bias=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
WDV52Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oo/coo46bs5zpai6ydehcambdwmxekrecdbbhcbxivotaebjhzp7llm.py # Topologically Sorted Source Nodes: [weight_wd_1], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # weight_wd_1 => clone, view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [16, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*x0) + (x1 // 4)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kk/ckkn7vlnz5gfvzigo3gxfhxwb6i7tj47p7plxoih22qwcvc7em26.py # Topologically Sorted Source Nodes: [weight_wd_5], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # weight_wd_5 => clone_2, view_4 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_2, [16, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_1 = async_compile.triton('triton_poi_fused__unsafe_view_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(y0 // 4)) + (y0 % 4)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4p/c4p5vybvk5kczshqzudonrympy7hqc7dyeur4eomof353wgjgkm6.py # Topologically Sorted Source Nodes: [tanh, mul, weight_wd_9], Original ATen: [aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # tanh => tanh # weight_wd_9 => add # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%squeeze,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %tanh), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_7), kwargs = {}) triton_poi_fused_add_mul_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/er/certponyoudrebqs7ovkmzybyrkb26c4kqh3oaysaxf4ryytitj2.py # Topologically Sorted Source Nodes: [tanh_1, mul_1, bias_wd_6], Original ATen: [aten.tanh, aten.mul, aten.add] # Source node to ATen node mapping: # bias_wd_6 => add_1 # mul_1 => mul_1 # tanh_1 => tanh_1 # Graph fragment: # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%squeeze_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_12, %tanh_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_13), kwargs = {}) triton_poi_fused_add_mul_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, 1), (1, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_1], Original ATen: [aten.clone, aten._unsafe_view] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_1], Original ATen: [aten.mm] extern_kernels.mm(buf0, primals_2, out=buf1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_3], Original ATen: [aten.clone, aten._unsafe_view] triton_poi_fused__unsafe_view_clone_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [weight_wd_3], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_3, out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_5], Original ATen: [aten.clone, aten._unsafe_view] triton_poi_fused__unsafe_view_clone_1.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [weight_wd_5], Original ATen: [aten.mm] extern_kernels.mm(buf4, primals_4, out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_7], Original ATen: [aten.clone, aten._unsafe_view] triton_poi_fused__unsafe_view_clone_1.run(buf5, buf6, 16, 4, grid=grid(16, 4), stream=stream0) del buf5 buf7 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_wd_7], Original ATen: [aten.mm] extern_kernels.mm(buf6, primals_5, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias_wd_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), primals_9, out=buf8) del primals_9 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias_wd_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (1, 4), 0), primals_10, out=buf9) buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [bias_wd_4], Original ATen: [aten.mm] extern_kernels.mm(buf9, primals_11, out=buf10) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, mul, weight_wd_9], Original ATen: [aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_tanh_2.run(primals_6, buf7, primals_7, buf11, 16, grid=grid(16), stream=stream0) del primals_7 buf12 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [tanh_1, mul_1, bias_wd_6], Original ATen: [aten.tanh, aten.mul, aten.add] triton_poi_fused_add_mul_tanh_3.run(primals_12, buf10, primals_13, buf12, 4, grid=grid(4), stream=stream0) del primals_13 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh_1, mul_1, bias_wd_6, linear], Original ATen: [aten.tanh, aten.mul, aten.add, aten.addmm] extern_kernels.addmm(buf12, reinterpret_tensor(primals_14, (64, 4), (4, 1), 0), reinterpret_tensor(buf11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del buf11 del buf12 return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_6, primals_10, primals_12, buf7, buf10, reinterpret_tensor(primals_14, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_11, (1, 4), (1, 1), 0), buf8, primals_8, reinterpret_tensor(buf6, (4, 16), (1, 4), 0), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0), reinterpret_tensor(buf4, (4, 16), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(buf2, (4, 16), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class WDV52Linear(torch.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of additional dimensions and :math:`H_{in} = \\text{in\\_features}` - Output: :math:`(N, *, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where :math:`k = \\frac{1}{\\text{in\\_features}}` bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where :math:`k = \\frac{1}{\\text{in\\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ['bias', 'in_features', 'out_features'] def __init__(self, ts, in_features, out_features, wd_in_features, wd_out_features, wd_decoder_layers, wd_require_gradient=False, bias =True): super(WDV52Linear, self).__init__() self.in_features = in_features self.out_features = out_features self.ts = ts self.wd_decoder_layers = wd_decoder_layers self.out_wd_weight = Parameter(torch.Tensor(wd_out_features, out_features)) self.in_wd_weight = Parameter(torch.Tensor(wd_in_features, in_features) ) self.ly_wd_weight = Parameter(torch.Tensor(ts, ts)) self.wd_weight = Parameter(torch.Tensor(ts, 1)) self.weight = Parameter(torch.Tensor(wd_out_features, wd_in_features, ts), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(out_features, in_features)) self.tanh_bias_weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.out_wd_bias = Parameter(torch.Tensor(wd_out_features, out_features)) self.ly_wd_bias = Parameter(torch.Tensor(ts, ts)) self.wd_bias = Parameter(torch.Tensor(ts, 1)) self.bias = Parameter(torch.Tensor(wd_out_features, ts), requires_grad=wd_require_gradient) self.tanh_weight_bias = Parameter(torch.Tensor(out_features)) self.tanh_bias_bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_normal_(self.out_wd_weight) torch.nn.init.xavier_normal_(self.in_wd_weight) torch.nn.init.xavier_normal_(self.ly_wd_weight) torch.nn.init.constant_(self.wd_weight, 1 / self.ts) torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: torch.nn.init.xavier_normal_(self.out_wd_bias) torch.nn.init.xavier_normal_(self.ly_wd_bias) torch.nn.init.constant_(self.wd_bias, 1 / self.ts) torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input): weight = self.weight weight_wd = torch.transpose(weight, 0, 2) weight_wd = torch.matmul(weight_wd, self.out_wd_weight) weight_wd = torch.transpose(weight_wd, 0, 2) weight_wd = torch.matmul(weight_wd, self.ly_wd_weight) weight_wd = torch.transpose(weight_wd, 1, 2) weight_wd = torch.matmul(weight_wd, self.in_wd_weight) weight_wd = torch.transpose(weight_wd, 1, 2) weight_wd = torch.matmul(weight_wd, self.wd_weight) weight_wd = weight_wd.squeeze(-1) weight_wd = self.tanh_weight_weight * torch.tanh(weight_wd ) + self.tanh_bias_weight bias = self.bias bias_wd = torch.transpose(bias, 0, 1) bias_wd = torch.matmul(bias_wd, self.out_wd_bias) bias_wd = torch.transpose(bias_wd, 0, 1) bias_wd = torch.matmul(bias_wd, self.ly_wd_bias) bias_wd = torch.matmul(bias_wd, self.wd_bias) bias_wd = bias_wd.squeeze(-1) bias_wd = self.tanh_weight_bias * torch.tanh(bias_wd ) + self.tanh_bias_bias return F.linear(input, weight_wd, bias_wd) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ts': 4, 'in_features': 4, 'out_features': 4, 'wd_in_features': 4, 'wd_out_features': 4, 'wd_decoder_layers': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x0 + x1 // 4), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__unsafe_view_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp2 = libdevice.tanh(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, 1), (1, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, primals_2, out=buf1) del primals_2 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.mm(buf2, primals_3, out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_1[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.mm(buf4, primals_4, out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__unsafe_view_clone_1[grid(16, 4)](buf5, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf5 buf7 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf6, primals_5, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), primals_9, out=buf8) del primals_9 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (4, 4), (1, 4), 0), primals_10, out=buf9) buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_11, out=buf10) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_tanh_2[grid(16)](primals_6, buf7, primals_7, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf12 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_mul_tanh_3[grid(4)](primals_12, buf10, primals_13, buf12, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_13 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf12, reinterpret_tensor(primals_14, (64, 4), (4, 1), 0), reinterpret_tensor(buf11, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf13) del buf11 del buf12 return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_6, primals_10, primals_12, buf7, buf10, reinterpret_tensor( primals_14, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 4), ( 1, 4), 0), reinterpret_tensor(primals_11, (1, 4), (1, 1), 0 ), buf8, primals_8, reinterpret_tensor(buf6, (4, 16), (1, 4), 0 ), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0 ), reinterpret_tensor(buf4, (4, 16), (1, 4), 0), reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(buf2, (4, 16), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(buf0, (4, 16), (1, 4), 0) class WDV52LinearNew(torch.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of additional dimensions and :math:`H_{in} = \\text{in\\_features}` - Output: :math:`(N, *, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where :math:`k = \\frac{1}{\\text{in\\_features}}` bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where :math:`k = \\frac{1}{\\text{in\\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ['bias', 'in_features', 'out_features'] def __init__(self, ts, in_features, out_features, wd_in_features, wd_out_features, wd_decoder_layers, wd_require_gradient=False, bias =True): super(WDV52LinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.ts = ts self.wd_decoder_layers = wd_decoder_layers self.out_wd_weight = Parameter(torch.Tensor(wd_out_features, out_features)) self.in_wd_weight = Parameter(torch.Tensor(wd_in_features, in_features) ) self.ly_wd_weight = Parameter(torch.Tensor(ts, ts)) self.wd_weight = Parameter(torch.Tensor(ts, 1)) self.weight = Parameter(torch.Tensor(wd_out_features, wd_in_features, ts), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(out_features, in_features)) self.tanh_bias_weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.out_wd_bias = Parameter(torch.Tensor(wd_out_features, out_features)) self.ly_wd_bias = Parameter(torch.Tensor(ts, ts)) self.wd_bias = Parameter(torch.Tensor(ts, 1)) self.bias = Parameter(torch.Tensor(wd_out_features, ts), requires_grad=wd_require_gradient) self.tanh_weight_bias = Parameter(torch.Tensor(out_features)) self.tanh_bias_bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_normal_(self.out_wd_weight) torch.nn.init.xavier_normal_(self.in_wd_weight) torch.nn.init.xavier_normal_(self.ly_wd_weight) torch.nn.init.constant_(self.wd_weight, 1 / self.ts) torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: torch.nn.init.xavier_normal_(self.out_wd_bias) torch.nn.init.xavier_normal_(self.ly_wd_bias) torch.nn.init.constant_(self.wd_bias, 1 / self.ts) torch.nn.init.constant_(self.tanh_bias_weight, 0.0) torch.nn.init.constant_(self.tanh_bias_bias, 0.0) fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def forward(self, input_0): primals_2 = self.out_wd_weight primals_3 = self.in_wd_weight primals_4 = self.ly_wd_weight primals_5 = self.wd_weight primals_1 = self.weight primals_6 = self.tanh_weight_weight primals_7 = self.tanh_bias_weight primals_8 = self.out_wd_bias primals_9 = self.ly_wd_bias primals_11 = self.wd_bias primals_10 = self.bias primals_12 = self.tanh_weight_bias primals_13 = self.tanh_bias_bias primals_14 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
Lollipop321/weight-distillation
WDV52Linear
false
5,568
[ "BSD-3-Clause" ]
1
cfc76ec58e3e88094dde1825287b2968f9718431
https://github.com/Lollipop321/weight-distillation/tree/cfc76ec58e3e88094dde1825287b2968f9718431
import math import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter import torch.onnx.operators from torch.nn.parameter import Parameter from torch.nn import init import torch.optim import torch.optim.lr_scheduler class Model(torch.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of additional dimensions and :math:`H_{in} = \\text{in\\_features}` - Output: :math:`(N, *, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where :math:`k = \\frac{1}{\\text{in\\_features}}` bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where :math:`k = \\frac{1}{\\text{in\\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ['bias', 'in_features', 'out_features'] def __init__(self, ts, in_features, out_features, wd_in_features, wd_out_features, wd_decoder_layers, wd_require_gradient=False, bias =True): super().__init__() self.in_features = in_features self.out_features = out_features self.ts = ts self.wd_decoder_layers = wd_decoder_layers self.out_wd_weight = Parameter(torch.Tensor(wd_out_features, out_features)) self.in_wd_weight = Parameter(torch.Tensor(wd_in_features, in_features) ) self.ly_wd_weight = Parameter(torch.Tensor(ts, ts)) self.wd_weight = Parameter(torch.Tensor(ts, 1)) self.weight = Parameter(torch.Tensor(wd_out_features, wd_in_features, ts), requires_grad=wd_require_gradient) self.tanh_weight_weight = Parameter(torch.Tensor(out_features, in_features)) self.tanh_bias_weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.out_wd_bias = Parameter(torch.Tensor(wd_out_features, out_features)) self.ly_wd_bias = Parameter(torch.Tensor(ts, ts)) self.wd_bias = Parameter(torch.Tensor(ts, 1)) self.bias = Parameter(torch.Tensor(wd_out_features, ts), requires_grad=wd_require_gradient) self.tanh_weight_bias = Parameter(torch.Tensor(out_features)) self.tanh_bias_bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_normal_(self.out_wd_weight) torch.nn.init.xavier_normal_(self.in_wd_weight) torch.nn.init.xavier_normal_(self.ly_wd_weight) torch.nn.init.constant_(self.wd_weight, 1 / self.ts) torch.nn.init.constant_(self.tanh_weight_weight, 1.0) torch.nn.init.constant_(self.tanh_weight_bias, 1.0) init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: torch.nn.init.xavier_normal_(self.out_wd_bias) torch.nn.init.xavier_normal_(self.ly_wd_bias) torch.nn.init.constant_(self.wd_bias, 1 / self.ts) # ... truncated (>4000 chars) for memory efficiency
DsBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rl/crlzvualg2za225vkja5fjpnxss76chgi2glfm2hpmsgp2hm7zo3.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%squeeze_5, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool3d_with_indices] buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2, 2], [2, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 return (buf3, buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) class DsBlock(nn.Module): def __init__(self, in_channels, out_channels, pooling): super(DsBlock, self).__init__() self.conv = conv3x3(in_channels, out_channels) self.relu = nn.ReLU(inplace=True) self.pooling = pooling if pooling: self.mp = nn.MaxPool3d(kernel_size=2, stride=2, padding=0) def forward(self, x): out = self.conv(x) out = self.relu(out) before_pool = out if self.pooling: out = self.mp(out) return out, before_pool def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'pooling': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.max_pool3d_with_indices.default(buf1, [2, 2, 2], [2, 2, 2]) buf3 = buf2[0] buf4 = buf2[1] del buf2 return buf3, buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, buf4, buf5 def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) class DsBlockNew(nn.Module): def __init__(self, in_channels, out_channels, pooling): super(DsBlockNew, self).__init__() self.conv = conv3x3(in_channels, out_channels) self.relu = nn.ReLU(inplace=True) self.pooling = pooling if pooling: self.mp = nn.MaxPool3d(kernel_size=2, stride=2, padding=0) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
MATHplus-Young-Academy/P2-Cardiac-Motion
DsBlock
false
5,569
[ "Apache-2.0" ]
1
844995e8e5760f981c425d13c0bd7f2f3bb8baec
https://github.com/MATHplus-Young-Academy/P2-Cardiac-Motion/tree/844995e8e5760f981c425d13c0bd7f2f3bb8baec
import torch import torch.nn as nn def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True): return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride= stride, padding=padding, bias=bias) class Model(nn.Module): def __init__(self, in_channels, out_channels, pooling): super().__init__() self.conv = conv3x3(in_channels, out_channels) self.relu = nn.ReLU(inplace=True) self.pooling = pooling if pooling: self.mp = nn.MaxPool3d(kernel_size=2, stride=2, padding=0) def forward(self, x): out = self.conv(x) out = self.relu(out) before_pool = out if self.pooling: out = self.mp(out) return out, before_pool def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
BinaryDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gd/cgdtd7ki7lurypoeyfwjebdfquygdeupjef4ltfbbbdk5u7owcpl.py # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # intersection => mul # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) tl.store(out_ptr2 + (x0), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/e4/ce4meaau7uw5ohjmea5wahpx7ugnh7nquzqbbvbbeqmpgntgbyen.py # Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, score, sum_4, truediv_1, score_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # mul_1 => mul_1 # score => div # score_1 => sub # sum_4 => sum_4 # truediv_1 => div_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-15), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-15), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp1 = 1e-15 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, score, sum_4, truediv_1, score_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub] triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BinaryDiceLoss(nn.Module): """SoftDice loss """ def __init__(self): super(BinaryDiceLoss, self).__init__() self.SM = nn.Sigmoid() def forward(self, logits, labels): num = labels.size(0) m1 = self.SM(logits).view(num, -1) m2 = labels.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + 1e-15) / (m1.sum(1) + m2.sum(1 ) + 1e-15) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 1e-15 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = 1.0 tmp16 = tmp15 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class BinaryDiceLossNew(nn.Module): """SoftDice loss """ def __init__(self): super(BinaryDiceLossNew, self).__init__() self.SM = nn.Sigmoid() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MIPT-Oulu/3D-Histo-Grading
BinaryDiceLoss
false
5,570
[ "MIT" ]
1
b779a154d0e5b104fc152c8952124768fb7b1dc6
https://github.com/MIPT-Oulu/3D-Histo-Grading/tree/b779a154d0e5b104fc152c8952124768fb7b1dc6
import torch import torch.nn as nn class Model(nn.Module): """SoftDice loss """ def __init__(self): super().__init__() self.SM = nn.Sigmoid() def forward(self, logits, labels): num = labels.size(0) m1 = self.SM(logits).view(num, -1) m2 = labels.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + 1e-15) / (m1.sum(1) + m2.sum(1 ) + 1e-15) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/an/cancvgq5crrl53xjlus3mms3in3pgeh5vjfezvvfgpcqipjh7byq.py # Topologically Sorted Source Nodes: [q_2, x], Original ATen: [aten.mul, aten.clone] # Source node to ATen node mapping: # q_2 => mul # x => clone # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_mul_0 = async_compile.triton('triton_poi_fused_clone_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_mul_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mh/cmhet4vfl4jlxtge4zzaaa2nugvxpr5f4ge7rs72qwe2aow7vxwy.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] # Source node to ATen node mapping: # x => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [3], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [3], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_4 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [q_2, x], Original ATen: [aten.mul, aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_mul_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, primals_8, buf8, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MultiHeadAttention(nn.Module): def __init__(self, hidden_size, attention_dropout_rate, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.att_size = att_size = hidden_size // num_heads self.scale = att_size ** -0.5 self.linear_q = nn.Linear(hidden_size, num_heads * att_size) self.linear_k = nn.Linear(hidden_size, num_heads * att_size) self.linear_v = nn.Linear(hidden_size, num_heads * att_size) self.att_dropout = nn.Dropout(attention_dropout_rate) self.output_layer = nn.Linear(num_heads * att_size, hidden_size) def forward(self, q, k, v, attn_bias=None): orig_q_size = q.size() d_k = self.att_size d_v = self.att_size batch_size = q.size(0) q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k) k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k) v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v) q = q.transpose(1, 2) v = v.transpose(1, 2) k = k.transpose(1, 2).transpose(2, 3) q = q * self.scale x = torch.matmul(q, k) if attn_bias is not None: x = x + attn_bias x = torch.softmax(x, dim=3) x = self.att_dropout(x) x = x.matmul(v) x = x.transpose(1, 2).contiguous() x = x.view(batch_size, -1, self.num_heads * d_v) x = self.output_layer(x) assert x.size() == orig_q_size return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'hidden_size': 4, 'attention_dropout_rate': 0.5, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_mul_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_mul_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadAttentionNew(nn.Module): def __init__(self, hidden_size, attention_dropout_rate, num_heads): super(MultiHeadAttentionNew, self).__init__() self.num_heads = num_heads self.att_size = att_size = hidden_size // num_heads self.scale = att_size ** -0.5 self.linear_q = nn.Linear(hidden_size, num_heads * att_size) self.linear_k = nn.Linear(hidden_size, num_heads * att_size) self.linear_v = nn.Linear(hidden_size, num_heads * att_size) self.att_dropout = nn.Dropout(attention_dropout_rate) self.output_layer = nn.Linear(num_heads * att_size, hidden_size) def forward(self, input_0, input_1, input_2): primals_2 = self.linear_q.weight primals_3 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Luo-Chang/Graphormer
MultiHeadAttention
false
5,571
[ "MIT" ]
1
b35b3ca6369e25cdae80e1617bfc3921feeb3158
https://github.com/Luo-Chang/Graphormer/tree/b35b3ca6369e25cdae80e1617bfc3921feeb3158
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, hidden_size, attention_dropout_rate, num_heads): super().__init__() self.num_heads = num_heads self.att_size = att_size = hidden_size // num_heads self.scale = att_size ** -0.5 self.linear_q = nn.Linear(hidden_size, num_heads * att_size) self.linear_k = nn.Linear(hidden_size, num_heads * att_size) self.linear_v = nn.Linear(hidden_size, num_heads * att_size) self.att_dropout = nn.Dropout(attention_dropout_rate) self.output_layer = nn.Linear(num_heads * att_size, hidden_size) def forward(self, q, k, v, attn_bias=None): orig_q_size = q.size() d_k = self.att_size d_v = self.att_size batch_size = q.size(0) q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k) k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k) v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v) q = q.transpose(1, 2) v = v.transpose(1, 2) k = k.transpose(1, 2).transpose(2, 3) q = q * self.scale x = torch.matmul(q, k) if attn_bias is not None: x = x + attn_bias x = torch.softmax(x, dim=3) x = self.att_dropout(x) x = x.matmul(v) x = x.transpose(1, 2).contiguous() x = x.view(batch_size, -1, self.num_heads * d_v) x = self.output_layer(x) assert x.size() == orig_q_size return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'hidden_size': 4, 'attention_dropout_rate': 0.5, 'num_heads': 4}]
Discrete
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3n/c3nyedrc56xoj6pmjzzgnpithkx2vti6qsjnj43ybcoj67zutjs4.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/y3/cy3yhdklte2jljt3jlkxw4g7pzz4g3oiwcgjauhd3xpun5n7blb6.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Discrete(nn.Module): def __init__(self): super(Discrete, self).__init__() def forward(self, x): return nn.functional.softmax(x, dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, class DiscreteNew(nn.Module): def __init__(self): super(DiscreteNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MPGek/client
Discrete
false
5,572
[ "Apache-2.0" ]
1
541d760c5cb8776b1ad5fcf1362d7382811cbc61
https://github.com/MPGek/client/tree/541d760c5cb8776b1ad5fcf1362d7382811cbc61
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return nn.functional.softmax(x, dim=0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AttentionPool2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ct/cctgbe64jgxq3sxjjjqccvq653sunfecfcizp3jcofnl7uiib7wo.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # x_1 => cat # x_2 => add # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %view], -1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %unsqueeze), kwargs = {}) triton_poi_fused_add_cat_1 = async_compile.triton('triton_poi_fused_add_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 17 x3 = (xindex // 17) x4 = xindex % 68 x5 = xindex tmp15 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 17, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr1 + ((16*x3) + ((-1) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x5), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rq/crqhbpxwwpvdowuqzuzvjdahxv45tx2y4dpxda2rurvr5kralgbn.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 0.7071067811865475), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 68) x3 = xindex % 68 x1 = (xindex // 17) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (204*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cr/ccrjqh2olqyevqox4t3kpoubo2s2m44cuvtxmf2k3slvdcgmmlcm.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865475), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 68) x3 = xindex % 68 x1 = (xindex // 17) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (68 + x3 + (204*x2)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6p/c6p34hneq7lp7a3tjiwk44lxqe2hzbpdjgbgmanamv35xvznwb2j.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_6, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_6, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_4 = async_compile.triton('triton_per_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 68 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (17*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (17*x0)), tmp11, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wf/cwf36kt6t5p6sv4fjknukcyy4vz6ejfamnreogfggthvquklzkf6.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_3, %primals_4, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 17) % 12 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/oe/coet3g5rxo652nti4d4ogiwpej2mrpgh4cyidpv6aivegc4mda7s.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_11, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (68*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (17*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uk/cuk32wvqx6lzak6biu2zut26pzpqa3ell4xsxs3qv3cpuuon7kkw.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_11, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 17) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 17), (17, 1)) assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (12, ), (1, )) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_1.run(buf0, primals_1, primals_2, buf1, 272, grid=grid(272), stream=stream0) del buf0 del primals_1 del primals_2 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 12, 17), (204, 17, 1)) buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf2, primals_4, buf3, 272, grid=grid(272), stream=stream0) buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf2, primals_4, buf4, 272, grid=grid(272), stream=stream0) buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_per_fused__softmax_4.run(buf5, buf8, 68, 17, grid=grid(68), stream=stream0) del buf5 buf9 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf9, primals_4, 816, grid=grid(816), stream=stream0) del primals_4 buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [a], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204, 1, 17), 136), out=buf10) buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf10, buf11, 16, 17, grid=grid(16, 17), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 17), (68, 17, 1)) del buf11 buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf13, primals_6, 272, grid=grid(272), stream=stream0) del primals_6 return (reinterpret_tensor(buf13, (4, 4), (68, 17), 0), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4, 4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204, 17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 17), (17, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch as th import torch.nn as nn def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) matmul_ops = 2 * b * num_spatial ** 2 * c model.total_ops += th.DoubleTensor([matmul_ops]) def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self. n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length)) weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads_channels: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) x = x + self.positional_embedding[None, :, :] x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np import torch as th import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 17 x3 = xindex // 17 x4 = xindex % 68 x5 = xindex tmp15 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 17, tl.int64) tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x5, tmp16, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 68 x3 = xindex % 68 x1 = xindex // 17 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 204 * x2), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 68 x3 = xindex % 68 x1 = xindex // 17 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (68 + x3 + 204 * x2), xmask) tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_per_fused__softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 68 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 17 * x0), tmp11, rmask & xmask) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 17 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 68 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 17 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 17 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 17), (17, 1)) assert_size_stride(primals_3, (12, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2, buf1, 272, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 12, 17), (204, 17, 1)) buf3 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_mul_2[grid(272)](buf2, primals_4, buf3, 272, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_mul_3[grid(272)](buf2, primals_4, buf4, 272, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 17, 4), (68, 1, 17), 0), buf4, out=buf5) buf8 = empty_strided_cuda((4, 17, 17), (289, 17, 1), torch.float32) triton_per_fused__softmax_4[grid(68)](buf5, buf8, 68, 17, XBLOCK=8, num_warps=2, num_stages=1) del buf5 buf9 = buf2 del buf2 triton_poi_fused_convolution_5[grid(816)](buf9, primals_4, 816, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf10 = empty_strided_cuda((4, 17, 4), (68, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 17, 4), (204, 1, 17), 136), out=buf10) buf11 = empty_strided_cuda((4, 4, 17), (68, 17, 1), torch.float32) triton_poi_fused_convolution_6[grid(16, 17)](buf10, buf11, 16, 17, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 17), (68, 17, 1)) del buf11 buf13 = buf12 del buf12 triton_poi_fused_convolution_7[grid(272)](buf13, primals_6, 272, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return reinterpret_tensor(buf13, (4, 4), (68, 17), 0 ), primals_3, primals_5, buf1, buf8, reinterpret_tensor(buf10, (4, 4, 17), (68, 1, 4), 0), reinterpret_tensor(buf9, (4, 4, 17), (204, 17, 1), 136), buf3, reinterpret_tensor(buf4, (4, 17, 4), (68, 1, 17), 0 ) def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) matmul_ops = 2 * b * num_spatial ** 2 * c model.total_ops += th.DoubleTensor([matmul_ops]) def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self. n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length)) weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class AttentionPool2dNew(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads_channels: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, input_0): primals_2 = self.positional_embedding primals_3 = self.qkv_proj.weight primals_4 = self.qkv_proj.bias primals_5 = self.c_proj.weight primals_6 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Liujingxiu23/guided-diffusion
AttentionPool2d
false
5,573
[ "MIT" ]
1
0ba878e517b276c45d1195eb29f6f5f72659a05b
https://github.com/Liujingxiu23/guided-diffusion/tree/0ba878e517b276c45d1195eb29f6f5f72659a05b
import math import torch import numpy as np import torch as th import torch.nn as nn def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) matmul_ops = 2 * b * num_spatial ** 2 * c model.total_ops += th.DoubleTensor([matmul_ops]) def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f'unsupported dimensions: {dims}') class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum('bct,bcs->bts', (q * scale).view(bs * self. n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length)) weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum('bts,bcs->bct', weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class Model(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads_channels: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) x = x + self.positional_embedding[None, :, :] x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
CrossEntropyDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/34/c34rlteenetjierkjaoyb2s72hhjrg66q5se6frawcegdjakljxp.py # Topologically Sorted Source Nodes: [tp, tp_1, tp_2, tp_3, mul_3, mul_4, sub, fn, fn_1, fn_2, mul_5, add, sub_1, fp, fp_1, fp_2, add_1, add_2, truediv, result, result_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.rsub, aten.div, aten.mean] # Source node to ATen node mapping: # add => add_1 # add_1 => add_2 # add_2 => add_3 # fn => mul_1 # fn_1 => sum_2 # fn_2 => sum_5 # fp => mul_2 # fp_1 => sum_3 # fp_2 => sum_6 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # result => sub_2 # result_1 => mean # sub => sub # sub_1 => sub_1 # tp => mul # tp_1 => sum_1 # tp_2 => sum_4 # tp_3 => add # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [-1]), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_2, [-1]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_5, 1.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_3, [-1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %sum_6), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %add_3), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_add_div_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (16*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (16*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (16*r0)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (16*r0)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (16*r0)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (16*r0)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (16*r0)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (16*r0)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 + (16*r0)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (4 + (16*r0)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (5 + (16*r0)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (5 + (16*r0)), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (6 + (16*r0)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (6 + (16*r0)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (7 + (16*r0)), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (7 + (16*r0)), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (8 + (16*r0)), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (8 + (16*r0)), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (9 + (16*r0)), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (9 + (16*r0)), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (10 + (16*r0)), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (10 + (16*r0)), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (11 + (16*r0)), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (11 + (16*r0)), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr0 + (12 + (16*r0)), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr1 + (12 + (16*r0)), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr0 + (13 + (16*r0)), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr1 + (13 + (16*r0)), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr0 + (14 + (16*r0)), None, eviction_policy='evict_last') tmp55 = tl.load(in_ptr1 + (14 + (16*r0)), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr0 + (15 + (16*r0)), None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr1 + (15 + (16*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = 1.0 tmp64 = tmp63 - tmp0 tmp65 = tmp64 * tmp1 tmp66 = tmp63 - tmp3 tmp67 = tmp66 * tmp4 tmp68 = tmp65 + tmp67 tmp69 = tmp63 - tmp7 tmp70 = tmp69 * tmp8 tmp71 = tmp68 + tmp70 tmp72 = tmp63 - tmp11 tmp73 = tmp72 * tmp12 tmp74 = tmp71 + tmp73 tmp75 = tmp63 - tmp15 tmp76 = tmp75 * tmp16 tmp77 = tmp63 - tmp18 tmp78 = tmp77 * tmp19 tmp79 = tmp76 + tmp78 tmp80 = tmp63 - tmp22 tmp81 = tmp80 * tmp23 tmp82 = tmp79 + tmp81 tmp83 = tmp63 - tmp26 tmp84 = tmp83 * tmp27 tmp85 = tmp82 + tmp84 tmp86 = tmp74 + tmp85 tmp87 = tmp63 - tmp31 tmp88 = tmp87 * tmp32 tmp89 = tmp63 - tmp34 tmp90 = tmp89 * tmp35 tmp91 = tmp88 + tmp90 tmp92 = tmp63 - tmp38 tmp93 = tmp92 * tmp39 tmp94 = tmp91 + tmp93 tmp95 = tmp63 - tmp42 tmp96 = tmp95 * tmp43 tmp97 = tmp94 + tmp96 tmp98 = tmp86 + tmp97 tmp99 = tmp63 - tmp47 tmp100 = tmp99 * tmp48 tmp101 = tmp63 - tmp50 tmp102 = tmp101 * tmp51 tmp103 = tmp100 + tmp102 tmp104 = tmp63 - tmp54 tmp105 = tmp104 * tmp55 tmp106 = tmp103 + tmp105 tmp107 = tmp63 - tmp58 tmp108 = tmp107 * tmp59 tmp109 = tmp106 + tmp108 tmp110 = tmp98 + tmp109 tmp111 = tmp63 - tmp1 tmp112 = tmp0 * tmp111 tmp113 = tmp63 - tmp4 tmp114 = tmp3 * tmp113 tmp115 = tmp112 + tmp114 tmp116 = tmp63 - tmp8 tmp117 = tmp7 * tmp116 tmp118 = tmp115 + tmp117 tmp119 = tmp63 - tmp12 tmp120 = tmp11 * tmp119 tmp121 = tmp118 + tmp120 tmp122 = tmp63 - tmp16 tmp123 = tmp15 * tmp122 tmp124 = tmp63 - tmp19 tmp125 = tmp18 * tmp124 tmp126 = tmp123 + tmp125 tmp127 = tmp63 - tmp23 tmp128 = tmp22 * tmp127 tmp129 = tmp126 + tmp128 tmp130 = tmp63 - tmp27 tmp131 = tmp26 * tmp130 tmp132 = tmp129 + tmp131 tmp133 = tmp121 + tmp132 tmp134 = tmp63 - tmp32 tmp135 = tmp31 * tmp134 tmp136 = tmp63 - tmp35 tmp137 = tmp34 * tmp136 tmp138 = tmp135 + tmp137 tmp139 = tmp63 - tmp39 tmp140 = tmp38 * tmp139 tmp141 = tmp138 + tmp140 tmp142 = tmp63 - tmp43 tmp143 = tmp42 * tmp142 tmp144 = tmp141 + tmp143 tmp145 = tmp133 + tmp144 tmp146 = tmp63 - tmp48 tmp147 = tmp47 * tmp146 tmp148 = tmp63 - tmp51 tmp149 = tmp50 * tmp148 tmp150 = tmp147 + tmp149 tmp151 = tmp63 - tmp55 tmp152 = tmp54 * tmp151 tmp153 = tmp150 + tmp152 tmp154 = tmp63 - tmp59 tmp155 = tmp58 * tmp154 tmp156 = tmp153 + tmp155 tmp157 = tmp145 + tmp156 tmp158 = 0.0 tmp159 = tmp62 + tmp158 tmp160 = 2.0 tmp161 = tmp159 * tmp160 tmp162 = tmp110 * tmp63 tmp163 = tmp161 + tmp162 tmp164 = tmp163 + tmp157 tmp165 = 1e-06 tmp166 = tmp164 + tmp165 tmp167 = tmp161 / tmp166 tmp168 = tmp63 - tmp167 tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK]) tmp171 = tl.sum(tmp169, 1)[:, None] tl.store(out_ptr3 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp171, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ji/cjiatqqtn7v7exy34pj4bpfh6qxtkb2r465o4j2gx3br3ej5xjgr.py # Topologically Sorted Source Nodes: [tp_3, mul_3, mul_4, mul_5, add, add_1, add_2, truediv, result, result_1, mul_6, target_1, ce, mul_7, add_4], Original ATen: [aten.add, aten.mul, aten.div, aten.rsub, aten.mean, aten.argmax, aten.nll_loss2d_forward] # Source node to ATen node mapping: # add => add_1 # add_1 => add_2 # add_2 => add_3 # add_4 => add_5 # ce => convert_element_type, div_1, full_default_1, ne_1, ne_2, neg, sum_7, sum_8, where_1 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # result => sub_2 # result_1 => mean # target_1 => argmax # tp_3 => add # truediv => div # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_5, 1.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %sum_6), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %add_3), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {}) # %argmax : [num_users=4] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, 1), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_7, torch.float32), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_8, %convert_element_type), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1 = async_compile.triton('triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp17 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp32 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp69 = tl.load(in_out_ptr0 + (0)) tmp70 = tl.broadcast_to(tmp69, [XBLOCK, 1]) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tl.full([1, 1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1, 1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tl.full([1, 1], -100, tl.int64) tmp48 = tmp46 != tmp47 tmp49 = tl.where(tmp48, tmp46, tmp10) tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp51 = tmp49 + tmp50 tmp52 = tmp49 < 0 tmp53 = tl.where(tmp52, tmp51, tmp49) tl.device_assert((0 <= tmp53) & (tmp53 < 4), "index out of bounds: 0 <= tmp53 < 4") tmp55 = tl.load(in_ptr1 + (r0 + (16*tmp53) + (64*r1)), None) tmp56 = 1e-06 tmp57 = tmp55 + tmp56 tmp58 = tl_math.log(tmp57) tmp59 = -tmp58 tmp60 = 0.0 tmp61 = tl.where(tmp48, tmp59, tmp60) tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = tl.sum(tmp62, 1)[:, None] tmp65 = tmp48.to(tl.int64) tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK]) tmp68 = tl.sum(tmp66, 1)[:, None] tmp71 = 16.0 tmp72 = tmp70 / tmp71 tmp73 = 0.5 tmp74 = tmp72 * tmp73 tmp75 = tmp68.to(tl.float32) tmp76 = tmp64 / tmp75 tmp77 = tmp76 * tmp73 tmp78 = tmp74 + tmp77 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp78, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [tp, tp_1, tp_2, tp_3, mul_3, mul_4, sub, fn, fn_1, fn_2, mul_5, add, sub_1, fp, fp_1, fp_2, add_1, add_2, truediv, result, result_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.rsub, aten.div, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_rsub_sum_0.run(arg1_1, arg0_1, buf3, 1, 16, grid=grid(1), stream=stream0) buf7 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [tp_3, mul_3, mul_4, mul_5, add, add_1, add_2, truediv, result, result_1, mul_6, target_1, ce, mul_7, add_4], Original ATen: [aten.add, aten.mul, aten.div, aten.rsub, aten.mean, aten.argmax, aten.nll_loss2d_forward] triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1.run(buf7, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Union from typing import Optional from typing import Iterable from torch import nn class FScoreLoss(nn.modules.loss._WeightedLoss): """Uses the 1 - F-score as a loss. .. math:: F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP } Args: beta: The beta in the above formula. eps: Epsilon for numerical stability. tp_bias: This is added to the TP count. Should add a little stability for very small structures. make_onehot: Convert the target segmentation to onehot internally. Turn this off if the target is already onehot. make_onehot_newaxis: 'newaxis' argument for the onehot conversion. ignore_index: These classes will not contribute to the loss. Has no effect if reduction is "none". weight: Weights for the different classes. Has no effect if reduction is "none". reduction: "mean", "sum", or "none". """ def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias: 'Union[int, float]'=0, make_onehot: 'bool'=False, make_onehot_newaxis: 'bool'=False, ignore_index: 'Optional[Union[int, Iterable[int]]]'=None, weight: 'Optional[torch.Tensor]'=None, reduction='mean', **kwargs): super(FScoreLoss, self).__init__(weight=weight, reduction=reduction) self.beta = beta self.eps = eps self.tp_bias = tp_bias self.make_onehot = make_onehot self.make_onehot_newaxis = make_onehot_newaxis self.ignore_index = ignore_index if isinstance(ignore_index, int): self.ignore_index = [ignore_index] def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Forward pass through the loss. Args: input_: Input with expected shape (B, C, ...) with C the number of classes. target: Target, either onehot with shape (B, C, ...), or not with shape either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and make_onehot_newaxis arguments appropriately. Returns: The loss. """ assert not target.requires_grad if self.make_onehot: target = make_onehot_segmentation(target, range(input_.shape[1] ), newaxis=self.make_onehot_newaxis) target = target.float() tp = input_ * target fn = (1 - input_) * target fp = input_ * (1 - target) while tp.dim() > 2: tp = tp.sum(-1, keepdim=False) fn = fn.sum(-1, keepdim=False) fp = fp.sum(-1, keepdim=False) tp += self.tp_bias result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta * self.beta) * tp + self.beta * self.beta * fn + fp + self.eps) if self.reduction != 'none': if self.weight is not None: self.weight = self.weight.float() self.weight = self.weight weight = self.weight.expand_as(result) result = result * weight if self.ignore_index is not None: for cls in sorted(self.ignore_index, reverse=True): if cls == result.shape[1] - 1: result = result[:, :-1] elif cls == 0: result = result[:, 1:] else: result = torch.cat([result[:, :cls], result[:, cls + 1:]], 1) if self.reduction == 'mean': result = torch.mean(result) elif self.reduction == 'sum': result = torch.sum(result) else: raise ValueError( "reduction must be 'none', 'mean' or 'sum', but is {}". format(self.reduction)) return result class DiceLoss(FScoreLoss): """FScoreLoss with beta=1.""" def __init__(self, **kwargs): kwargs['beta'] = 1.0 super(DiceLoss, self).__init__(**kwargs) def forward(self, input_, target): return super(DiceLoss, self).forward(input_, target) class CrossEntropyDiceLoss(DiceLoss): """Weighted sum of CE and Dice losses. Expects softmax inputs! Args: ce_weight: The weight for the CE loss. Weight for the Dice loss will be 1 - ce_weight. """ def __init__(self, ce_weight: 'float'=0.5, *args, **kwargs): super().__init__(*args, **kwargs) self.ce_weight = ce_weight self.ce = nn.NLLLoss(weight=kwargs.get('weight', None)) def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Forward pass through the loss. Args: input_: Input with expected shape (B, C, ...) with C the number of classes. target: Target, either onehot with shape (B, C, ...), or not with shape either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and make_onehot_newaxis arguments appropriately. Returns: The loss. """ dice = super().forward(input_, target) if self.make_onehot: if self.make_onehot_newaxis: pass else: target = target.squeeze() else: target = torch.argmax(target, 1, keepdim=False) target = target.long() ce = self.ce(torch.log(input_ + self.eps), target) return (1 - self.ce_weight) * dice + self.ce_weight * ce def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Union from typing import Optional from typing import Iterable from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 16 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 16 * r0), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 16 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 16 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 16 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 16 * r0), None, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 16 * r0), None, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (4 + 16 * r0), None, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (4 + 16 * r0), None, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (5 + 16 * r0), None, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (6 + 16 * r0), None, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (6 + 16 * r0), None, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + (7 + 16 * r0), None, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (7 + 16 * r0), None, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr0 + (8 + 16 * r0), None, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr1 + (8 + 16 * r0), None, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr0 + (9 + 16 * r0), None, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr1 + (9 + 16 * r0), None, eviction_policy='evict_last' ) tmp38 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr1 + (10 + 16 * r0), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (11 + 16 * r0), None, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr1 + (11 + 16 * r0), None, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (12 + 16 * r0), None, eviction_policy= 'evict_last') tmp48 = tl.load(in_ptr1 + (12 + 16 * r0), None, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (13 + 16 * r0), None, eviction_policy= 'evict_last') tmp51 = tl.load(in_ptr1 + (13 + 16 * r0), None, eviction_policy= 'evict_last') tmp54 = tl.load(in_ptr0 + (14 + 16 * r0), None, eviction_policy= 'evict_last') tmp55 = tl.load(in_ptr1 + (14 + 16 * r0), None, eviction_policy= 'evict_last') tmp58 = tl.load(in_ptr0 + (15 + 16 * r0), None, eviction_policy= 'evict_last') tmp59 = tl.load(in_ptr1 + (15 + 16 * r0), None, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = 1.0 tmp64 = tmp63 - tmp0 tmp65 = tmp64 * tmp1 tmp66 = tmp63 - tmp3 tmp67 = tmp66 * tmp4 tmp68 = tmp65 + tmp67 tmp69 = tmp63 - tmp7 tmp70 = tmp69 * tmp8 tmp71 = tmp68 + tmp70 tmp72 = tmp63 - tmp11 tmp73 = tmp72 * tmp12 tmp74 = tmp71 + tmp73 tmp75 = tmp63 - tmp15 tmp76 = tmp75 * tmp16 tmp77 = tmp63 - tmp18 tmp78 = tmp77 * tmp19 tmp79 = tmp76 + tmp78 tmp80 = tmp63 - tmp22 tmp81 = tmp80 * tmp23 tmp82 = tmp79 + tmp81 tmp83 = tmp63 - tmp26 tmp84 = tmp83 * tmp27 tmp85 = tmp82 + tmp84 tmp86 = tmp74 + tmp85 tmp87 = tmp63 - tmp31 tmp88 = tmp87 * tmp32 tmp89 = tmp63 - tmp34 tmp90 = tmp89 * tmp35 tmp91 = tmp88 + tmp90 tmp92 = tmp63 - tmp38 tmp93 = tmp92 * tmp39 tmp94 = tmp91 + tmp93 tmp95 = tmp63 - tmp42 tmp96 = tmp95 * tmp43 tmp97 = tmp94 + tmp96 tmp98 = tmp86 + tmp97 tmp99 = tmp63 - tmp47 tmp100 = tmp99 * tmp48 tmp101 = tmp63 - tmp50 tmp102 = tmp101 * tmp51 tmp103 = tmp100 + tmp102 tmp104 = tmp63 - tmp54 tmp105 = tmp104 * tmp55 tmp106 = tmp103 + tmp105 tmp107 = tmp63 - tmp58 tmp108 = tmp107 * tmp59 tmp109 = tmp106 + tmp108 tmp110 = tmp98 + tmp109 tmp111 = tmp63 - tmp1 tmp112 = tmp0 * tmp111 tmp113 = tmp63 - tmp4 tmp114 = tmp3 * tmp113 tmp115 = tmp112 + tmp114 tmp116 = tmp63 - tmp8 tmp117 = tmp7 * tmp116 tmp118 = tmp115 + tmp117 tmp119 = tmp63 - tmp12 tmp120 = tmp11 * tmp119 tmp121 = tmp118 + tmp120 tmp122 = tmp63 - tmp16 tmp123 = tmp15 * tmp122 tmp124 = tmp63 - tmp19 tmp125 = tmp18 * tmp124 tmp126 = tmp123 + tmp125 tmp127 = tmp63 - tmp23 tmp128 = tmp22 * tmp127 tmp129 = tmp126 + tmp128 tmp130 = tmp63 - tmp27 tmp131 = tmp26 * tmp130 tmp132 = tmp129 + tmp131 tmp133 = tmp121 + tmp132 tmp134 = tmp63 - tmp32 tmp135 = tmp31 * tmp134 tmp136 = tmp63 - tmp35 tmp137 = tmp34 * tmp136 tmp138 = tmp135 + tmp137 tmp139 = tmp63 - tmp39 tmp140 = tmp38 * tmp139 tmp141 = tmp138 + tmp140 tmp142 = tmp63 - tmp43 tmp143 = tmp42 * tmp142 tmp144 = tmp141 + tmp143 tmp145 = tmp133 + tmp144 tmp146 = tmp63 - tmp48 tmp147 = tmp47 * tmp146 tmp148 = tmp63 - tmp51 tmp149 = tmp50 * tmp148 tmp150 = tmp147 + tmp149 tmp151 = tmp63 - tmp55 tmp152 = tmp54 * tmp151 tmp153 = tmp150 + tmp152 tmp154 = tmp63 - tmp59 tmp155 = tmp58 * tmp154 tmp156 = tmp153 + tmp155 tmp157 = tmp145 + tmp156 tmp158 = 0.0 tmp159 = tmp62 + tmp158 tmp160 = 2.0 tmp161 = tmp159 * tmp160 tmp162 = tmp110 * tmp63 tmp163 = tmp161 + tmp162 tmp164 = tmp163 + tmp157 tmp165 = 1e-06 tmp166 = tmp164 + tmp165 tmp167 = tmp161 / tmp166 tmp168 = tmp63 - tmp167 tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK]) tmp171 = tl.sum(tmp169, 1)[:, None] tl.store(out_ptr3 + tl.full([XBLOCK, 1], 0, tl.int32), tmp171, None) @triton.jit def triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp17 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp32 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp69 = tl.load(in_out_ptr0 + 0) tmp70 = tl.broadcast_to(tmp69, [XBLOCK, 1]) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tl.full([1, 1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1, 1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tl.full([1, 1], -100, tl.int64) tmp48 = tmp46 != tmp47 tmp49 = tl.where(tmp48, tmp46, tmp10) tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp51 = tmp49 + tmp50 tmp52 = tmp49 < 0 tmp53 = tl.where(tmp52, tmp51, tmp49) tl.device_assert((0 <= tmp53) & (tmp53 < 4), 'index out of bounds: 0 <= tmp53 < 4') tmp55 = tl.load(in_ptr1 + (r0 + 16 * tmp53 + 64 * r1), None) tmp56 = 1e-06 tmp57 = tmp55 + tmp56 tmp58 = tl_math.log(tmp57) tmp59 = -tmp58 tmp60 = 0.0 tmp61 = tl.where(tmp48, tmp59, tmp60) tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = tl.sum(tmp62, 1)[:, None] tmp65 = tmp48.to(tl.int64) tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK]) tmp68 = tl.sum(tmp66, 1)[:, None] tmp71 = 16.0 tmp72 = tmp70 / tmp71 tmp73 = 0.5 tmp74 = tmp72 * tmp73 tmp75 = tmp68.to(tl.float32) tmp76 = tmp64 / tmp75 tmp77 = tmp76 * tmp73 tmp78 = tmp74 + tmp77 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp78, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](arg1_1, arg0_1, buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf7 = buf3 del buf3 triton_per_fused_add_argmax_div_mean_mul_nll_loss2d_forward_rsub_1[grid (1)](buf7, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf7, class FScoreLoss(nn.modules.loss._WeightedLoss): """Uses the 1 - F-score as a loss. .. math:: F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP } Args: beta: The beta in the above formula. eps: Epsilon for numerical stability. tp_bias: This is added to the TP count. Should add a little stability for very small structures. make_onehot: Convert the target segmentation to onehot internally. Turn this off if the target is already onehot. make_onehot_newaxis: 'newaxis' argument for the onehot conversion. ignore_index: These classes will not contribute to the loss. Has no effect if reduction is "none". weight: Weights for the different classes. Has no effect if reduction is "none". reduction: "mean", "sum", or "none". """ def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias: 'Union[int, float]'=0, make_onehot: 'bool'=False, make_onehot_newaxis: 'bool'=False, ignore_index: 'Optional[Union[int, Iterable[int]]]'=None, weight: 'Optional[torch.Tensor]'=None, reduction='mean', **kwargs): super(FScoreLoss, self).__init__(weight=weight, reduction=reduction) self.beta = beta self.eps = eps self.tp_bias = tp_bias self.make_onehot = make_onehot self.make_onehot_newaxis = make_onehot_newaxis self.ignore_index = ignore_index if isinstance(ignore_index, int): self.ignore_index = [ignore_index] def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Forward pass through the loss. Args: input_: Input with expected shape (B, C, ...) with C the number of classes. target: Target, either onehot with shape (B, C, ...), or not with shape either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and make_onehot_newaxis arguments appropriately. Returns: The loss. """ assert not target.requires_grad if self.make_onehot: target = make_onehot_segmentation(target, range(input_.shape[1] ), newaxis=self.make_onehot_newaxis) target = target.float() tp = input_ * target fn = (1 - input_) * target fp = input_ * (1 - target) while tp.dim() > 2: tp = tp.sum(-1, keepdim=False) fn = fn.sum(-1, keepdim=False) fp = fp.sum(-1, keepdim=False) tp += self.tp_bias result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta * self.beta) * tp + self.beta * self.beta * fn + fp + self.eps) if self.reduction != 'none': if self.weight is not None: self.weight = self.weight.float() self.weight = self.weight weight = self.weight.expand_as(result) result = result * weight if self.ignore_index is not None: for cls in sorted(self.ignore_index, reverse=True): if cls == result.shape[1] - 1: result = result[:, :-1] elif cls == 0: result = result[:, 1:] else: result = torch.cat([result[:, :cls], result[:, cls + 1:]], 1) if self.reduction == 'mean': result = torch.mean(result) elif self.reduction == 'sum': result = torch.sum(result) else: raise ValueError( "reduction must be 'none', 'mean' or 'sum', but is {}". format(self.reduction)) return result class DiceLoss(FScoreLoss): """FScoreLoss with beta=1.""" def __init__(self, **kwargs): kwargs['beta'] = 1.0 super(DiceLoss, self).__init__(**kwargs) def forward(self, input_, target): return super(DiceLoss, self).forward(input_, target) class CrossEntropyDiceLossNew(DiceLoss): """Weighted sum of CE and Dice losses. Expects softmax inputs! Args: ce_weight: The weight for the CE loss. Weight for the Dice loss will be 1 - ce_weight. """ def __init__(self, ce_weight: 'float'=0.5, *args, **kwargs): super().__init__(*args, **kwargs) self.ce_weight = ce_weight self.ce = nn.NLLLoss(weight=kwargs.get('weight', None)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MIC-DKFZ/image-time-series
CrossEntropyDiceLoss
false
5,574
[ "MIT" ]
1
0480d5cb6936c7d9e839b6741f18c10893d78d8a
https://github.com/MIC-DKFZ/image-time-series/tree/0480d5cb6936c7d9e839b6741f18c10893d78d8a
import torch from typing import Union from typing import Optional from typing import Iterable from torch import nn class FScoreLoss(nn.modules.loss._WeightedLoss): """Uses the 1 - F-score as a loss. .. math:: F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP } Args: beta: The beta in the above formula. eps: Epsilon for numerical stability. tp_bias: This is added to the TP count. Should add a little stability for very small structures. make_onehot: Convert the target segmentation to onehot internally. Turn this off if the target is already onehot. make_onehot_newaxis: 'newaxis' argument for the onehot conversion. ignore_index: These classes will not contribute to the loss. Has no effect if reduction is "none". weight: Weights for the different classes. Has no effect if reduction is "none". reduction: "mean", "sum", or "none". """ def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias: 'Union[int, float]'=0, make_onehot: 'bool'=False, make_onehot_newaxis: 'bool'=False, ignore_index: 'Optional[Union[int, Iterable[int]]]'=None, weight: 'Optional[torch.Tensor]'=None, reduction='mean', **kwargs): super().__init__(weight=weight, reduction=reduction) self.beta = beta self.eps = eps self.tp_bias = tp_bias self.make_onehot = make_onehot self.make_onehot_newaxis = make_onehot_newaxis self.ignore_index = ignore_index if isinstance(ignore_index, int): self.ignore_index = [ignore_index] def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Forward pass through the loss. Args: input_: Input with expected shape (B, C, ...) with C the number of classes. target: Target, either onehot with shape (B, C, ...), or not with shape either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and make_onehot_newaxis arguments appropriately. Returns: The loss. """ assert not target.requires_grad if self.make_onehot: target = make_onehot_segmentation(target, range(input_.shape[1] ), newaxis=self.make_onehot_newaxis) target = target.float() tp = input_ * target fn = (1 - input_) * target fp = input_ * (1 - target) while tp.dim() > 2: tp = tp.sum(-1, keepdim=False) fn = fn.sum(-1, keepdim=False) fp = fp.sum(-1, keepdim=False) tp += self.tp_bias result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta * self.beta) * tp + self.beta * self.beta * fn + fp + self.eps) if self.reduction != 'none': if self.weight is not None: self.weight = self.weight.float() self.weight = self.weight weight = self.weight.expand_as(result) result = result * weight if self.ignore_index is not None: for cls in sorted(self.ignore_index, reverse=True): if cls == result.shape[1] - 1: result = result[:, :-1] elif cls == 0: result = result[:, 1:] else: result = torch.cat([result[:, :cls], result[:, cls + 1:]], 1) if self.reduction == 'mean': result = torch.mean(result) elif self.reduction == 'sum': result = torch.sum(result) else: raise ValueError( "reduction must be 'none', 'mean' or 'sum', but is {}". format(self.reduction)) return result class DiceLoss(FScoreLoss): """FScoreLoss with beta=1.""" def __init__(self, ** # ... truncated (>4000 chars) for memory efficiency
AvgPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/pr/cprzlfpjjqlj6tudvbc455jxno35xlnta4wgmkbc6uo5zmcxii4s.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => avg_pool2d # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], False, False), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x0))) + ((-1) + (2*x0)) * (((-1) + (2*x0)) > (0)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-1)*((0) * ((0) >= ((-1) + (2*x1))) + ((-1) + (2*x1)) * (((-1) + (2*x1)) > (0)))*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + (x4), tmp93, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class AvgPoolPad(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = tmp29 + tmp19 tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp40 + tmp30 tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tmp51 + tmp41 tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp58 + tmp52 tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = tmp65 + tmp59 tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = tmp76 + tmp66 tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tmp83 + tmp77 tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = tmp90 + tmp84 tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * ( 0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + ( -1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) tmp93 = tmp91 / tmp92 tl.store(out_ptr0 + x4, tmp93, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), class AvgPoolPadNew(nn.Module): def __init__(self, stride=2, padding=1): super(AvgPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MarioProjects/pytorchlib
AvgPoolPad
false
5,575
[ "MIT" ]
1
81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
https://github.com/MarioProjects/pytorchlib/tree/81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
import torch from torch import nn class Model(nn.Module): def __init__(self, stride=2, padding=1): super().__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MaxPoolPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jf/cjf7zenaxtvwhbfrvvghsyyrrhxyrlvtj5rotfw7n2nqtvscv3l7.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => constant_pad_nd # x_1 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 0, 1, 0], 0.0), kwargs = {}) # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%constant_pad_nd, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {}) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = (-2) + (2*x1) tmp12 = tmp11 >= tmp1 tmp13 = (-2) + (2*x0) tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + ((-10) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float("-inf"), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2*x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + ((-9) + (2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float("-inf"), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + (2*x0) tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + ((-8) + (2*x0) + (8*x1) + (16*x2)), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float("-inf"), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2*x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + ((-6) + (2*x0) + (8*x1) + (16*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float("-inf"), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float("-inf"), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float("-inf"), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + (2*x1) tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + ((-2) + (2*x0) + (8*x1) + (16*x2)), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float("-inf"), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float("-inf"), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float("-inf"), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + (x4), tmp91, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.constant_pad_nd, aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class MaxPoolPad(nn.Module): def __init__(self): super(MaxPoolPad, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = -2 + 2 * x1 tmp12 = tmp11 >= tmp1 tmp13 = -2 + 2 * x0 tmp14 = tmp13 >= tmp1 tmp15 = tmp12 & tmp14 tmp16 = tmp15 & tmp10 tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype) tmp19 = tl.where(tmp10, tmp17, tmp18) tmp20 = 2 * x0 tmp21 = tmp20 >= tmp1 tmp22 = tmp20 < tmp3 tmp23 = tmp21 & tmp22 tmp24 = tmp5 & tmp23 tmp25 = tmp12 & tmp7 tmp26 = tmp25 & tmp24 tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype) tmp29 = tl.where(tmp24, tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp29, tmp19) tmp31 = 1 + 2 * x0 tmp32 = tmp31 >= tmp1 tmp33 = tmp31 < tmp3 tmp34 = tmp32 & tmp33 tmp35 = tmp5 & tmp34 tmp36 = tmp12 & tmp21 tmp37 = tmp36 & tmp35 tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp40, tmp30) tmp42 = 2 * x1 tmp43 = tmp42 >= tmp1 tmp44 = tmp42 < tmp3 tmp45 = tmp43 & tmp44 tmp46 = tmp45 & tmp9 tmp47 = tmp2 & tmp14 tmp48 = tmp47 & tmp46 tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype) tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = triton_helpers.maximum(tmp51, tmp41) tmp53 = tmp45 & tmp23 tmp54 = tmp2 & tmp7 tmp55 = tmp54 & tmp53 tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp52) tmp60 = tmp45 & tmp34 tmp61 = tmp2 & tmp21 tmp62 = tmp61 & tmp60 tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype) tmp65 = tl.where(tmp60, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp59) tmp67 = 1 + 2 * x1 tmp68 = tmp67 >= tmp1 tmp69 = tmp67 < tmp3 tmp70 = tmp68 & tmp69 tmp71 = tmp70 & tmp9 tmp72 = tmp43 & tmp14 tmp73 = tmp72 & tmp71 tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 & xmask, eviction_policy='evict_last', other=0.0) tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype) tmp76 = tl.where(tmp71, tmp74, tmp75) tmp77 = triton_helpers.maximum(tmp76, tmp66) tmp78 = tmp70 & tmp23 tmp79 = tmp43 & tmp7 tmp80 = tmp79 & tmp78 tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 & xmask, eviction_policy='evict_last', other=0.0) tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype) tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp70 & tmp34 tmp86 = tmp43 & tmp21 tmp87 = tmp86 & tmp85 tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype) tmp90 = tl.where(tmp85, tmp88, tmp89) tmp91 = triton_helpers.maximum(tmp90, tmp84) tl.store(out_ptr0 + x4, tmp91, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)]( arg0_1, buf0, 144, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4), class MaxPoolPadNew(nn.Module): def __init__(self): super(MaxPoolPadNew, self).__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MarioProjects/pytorchlib
MaxPoolPad
false
5,576
[ "MIT" ]
1
81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
https://github.com/MarioProjects/pytorchlib/tree/81ea32304d899fbd10ae1efe1d124c0d7bc96f5c
import torch from torch import nn class Model(nn.Module): def __init__(self): super().__init__() self.pad = nn.ZeroPad2d((1, 0, 1, 0)) self.pool = nn.MaxPool2d(3, stride=2, padding=1) def forward(self, x): x = self.pad(x) x = self.pool(x) x = x[:, :, 1:, 1:] return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/z3/cz3vliqlpgih6ihwoaxl6cmnicfmv2ygutcuphilcsragp3evc57.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL ='relu', same_padding=False, bn=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None if NL == 'relu': self.relu = nn.ReLU(inplace=True) elif NL == 'prelu': self.relu = nn.PReLU() else: self.relu = None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.relu is not None: x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 class Conv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL ='relu', same_padding=False, bn=False): super(Conv2dNew, self).__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None if NL == 'relu': self.relu = nn.ReLU(inplace=True) elif NL == 'prelu': self.relu = nn.PReLU() else: self.relu = None def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
MONICA-Project/sfn
Conv2d
false
5,577
[ "Apache-2.0" ]
1
40509e520e83441068b5a2d151864fe3a5814d5e
https://github.com/MONICA-Project/sfn/tree/40509e520e83441068b5a2d151864fe3a5814d5e
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL ='relu', same_padding=False, bn=False): super().__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None if NL == 'relu': self.relu = nn.ReLU(inplace=True) elif NL == 'prelu': self.relu = nn.PReLU() else: self.relu = None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.relu is not None: x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
Policy
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fj/cfjqcl5zyffb5ly6gkhpzdblek24xftrdhp34wnn56hi434h2szb.py # Topologically Sorted Source Nodes: [alphas], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alphas => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (6, 2), (2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((6, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [alphas], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(primals_1, buf0, 12, grid=grid(12), stream=stream0) del primals_1 return (buf0, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 2), (2, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from copy import deepcopy import torch.nn as nn class Policy(nn.Module): def __init__(self, max_nodes, search_space): super(Policy, self).__init__() self.max_nodes = max_nodes self.search_space = deepcopy(search_space) self.edge2index = {} for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) self.edge2index[node_str] = len(self.edge2index) self.arch_parameters = nn.Parameter(0.001 * torch.randn(len(self. edge2index), len(search_space))) def generate_arch(self, actions): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = self.search_space[actions[self.edge2index[node_str]]] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def genotype(self): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) with torch.no_grad(): weights = self.arch_parameters[self.edge2index[node_str]] op_name = self.search_space[weights.argmax().item()] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def forward(self): alphas = nn.functional.softmax(self.arch_parameters, dim=-1) return alphas def get_inputs(): return [] def get_init_inputs(): return [[], {'max_nodes': 4, 'search_space': [4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from copy import deepcopy import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (6, 2), (2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((6, 2), (2, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(12)](primals_1, buf0, 12, XBLOCK= 16, num_warps=1, num_stages=1) del primals_1 return buf0, buf0 class PolicyNew(nn.Module): def __init__(self, max_nodes, search_space): super(PolicyNew, self).__init__() self.max_nodes = max_nodes self.search_space = deepcopy(search_space) self.edge2index = {} for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) self.edge2index[node_str] = len(self.edge2index) self.arch_parameters = nn.Parameter(0.001 * torch.randn(len(self. edge2index), len(search_space))) def generate_arch(self, actions): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = self.search_space[actions[self.edge2index[node_str]]] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def genotype(self): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) with torch.no_grad(): weights = self.arch_parameters[self.edge2index[node_str]] op_name = self.search_space[weights.argmax().item()] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def forward(self): primals_1 = self.arch_parameters output = call([primals_1]) return output[0]
MUST-AI-Lab/NAS-Projects
Policy
false
5,578
[ "MIT" ]
1
fcb2aae34a2b3c02877fbdb41cda45e1e73327a6
https://github.com/MUST-AI-Lab/NAS-Projects/tree/fcb2aae34a2b3c02877fbdb41cda45e1e73327a6
import torch from copy import deepcopy import torch.nn as nn class Model(nn.Module): def __init__(self, max_nodes, search_space): super().__init__() self.max_nodes = max_nodes self.search_space = deepcopy(search_space) self.edge2index = {} for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) self.edge2index[node_str] = len(self.edge2index) self.arch_parameters = nn.Parameter(0.001 * torch.randn(len(self. edge2index), len(search_space))) def generate_arch(self, actions): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = self.search_space[actions[self.edge2index[node_str]]] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def genotype(self): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) with torch.no_grad(): weights = self.arch_parameters[self.edge2index[node_str]] op_name = self.search_space[weights.argmax().item()] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def forward(self): alphas = nn.functional.softmax(self.arch_parameters, dim=-1) return alphas def get_inputs(): return [] def get_init_inputs(): return [4]
FScoreLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fy/cfyns74s37xab276pc6dbqyls6hbctvmvaztno7qbvcax2iqsmkp.py # Topologically Sorted Source Nodes: [tp, tp_1, tp_2, tp_3, mul_3, mul_4, sub, fn, fn_1, fn_2, mul_5, add, sub_1, fp, fp_1, fp_2, add_1, add_2, truediv, result, result_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.rsub, aten.div, aten.mean] # Source node to ATen node mapping: # add => add_1 # add_1 => add_2 # add_2 => add_3 # fn => mul_1 # fn_1 => sum_2 # fn_2 => sum_5 # fp => mul_2 # fp_1 => sum_3 # fp_2 => sum_6 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # result => sub_2 # result_1 => mean # sub => sub # sub_1 => sub_1 # tp => mul # tp_1 => sum_1 # tp_2 => sum_4 # tp_3 => add # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [-1]), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_2, [-1]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_5, 1.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_3, [-1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %sum_6), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %add_3), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_add_div_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (16*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (16*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (16*r0)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (16*r0)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (16*r0)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (16*r0)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (16*r0)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (16*r0)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 + (16*r0)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (4 + (16*r0)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (5 + (16*r0)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (5 + (16*r0)), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (6 + (16*r0)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (6 + (16*r0)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (7 + (16*r0)), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (7 + (16*r0)), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (8 + (16*r0)), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (8 + (16*r0)), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (9 + (16*r0)), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (9 + (16*r0)), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (10 + (16*r0)), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr1 + (10 + (16*r0)), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (11 + (16*r0)), None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr1 + (11 + (16*r0)), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr0 + (12 + (16*r0)), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr1 + (12 + (16*r0)), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr0 + (13 + (16*r0)), None, eviction_policy='evict_last') tmp51 = tl.load(in_ptr1 + (13 + (16*r0)), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr0 + (14 + (16*r0)), None, eviction_policy='evict_last') tmp55 = tl.load(in_ptr1 + (14 + (16*r0)), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr0 + (15 + (16*r0)), None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr1 + (15 + (16*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = 1.0 tmp64 = tmp63 - tmp0 tmp65 = tmp64 * tmp1 tmp66 = tmp63 - tmp3 tmp67 = tmp66 * tmp4 tmp68 = tmp65 + tmp67 tmp69 = tmp63 - tmp7 tmp70 = tmp69 * tmp8 tmp71 = tmp68 + tmp70 tmp72 = tmp63 - tmp11 tmp73 = tmp72 * tmp12 tmp74 = tmp71 + tmp73 tmp75 = tmp63 - tmp15 tmp76 = tmp75 * tmp16 tmp77 = tmp63 - tmp18 tmp78 = tmp77 * tmp19 tmp79 = tmp76 + tmp78 tmp80 = tmp63 - tmp22 tmp81 = tmp80 * tmp23 tmp82 = tmp79 + tmp81 tmp83 = tmp63 - tmp26 tmp84 = tmp83 * tmp27 tmp85 = tmp82 + tmp84 tmp86 = tmp74 + tmp85 tmp87 = tmp63 - tmp31 tmp88 = tmp87 * tmp32 tmp89 = tmp63 - tmp34 tmp90 = tmp89 * tmp35 tmp91 = tmp88 + tmp90 tmp92 = tmp63 - tmp38 tmp93 = tmp92 * tmp39 tmp94 = tmp91 + tmp93 tmp95 = tmp63 - tmp42 tmp96 = tmp95 * tmp43 tmp97 = tmp94 + tmp96 tmp98 = tmp86 + tmp97 tmp99 = tmp63 - tmp47 tmp100 = tmp99 * tmp48 tmp101 = tmp63 - tmp50 tmp102 = tmp101 * tmp51 tmp103 = tmp100 + tmp102 tmp104 = tmp63 - tmp54 tmp105 = tmp104 * tmp55 tmp106 = tmp103 + tmp105 tmp107 = tmp63 - tmp58 tmp108 = tmp107 * tmp59 tmp109 = tmp106 + tmp108 tmp110 = tmp98 + tmp109 tmp111 = tmp63 - tmp1 tmp112 = tmp0 * tmp111 tmp113 = tmp63 - tmp4 tmp114 = tmp3 * tmp113 tmp115 = tmp112 + tmp114 tmp116 = tmp63 - tmp8 tmp117 = tmp7 * tmp116 tmp118 = tmp115 + tmp117 tmp119 = tmp63 - tmp12 tmp120 = tmp11 * tmp119 tmp121 = tmp118 + tmp120 tmp122 = tmp63 - tmp16 tmp123 = tmp15 * tmp122 tmp124 = tmp63 - tmp19 tmp125 = tmp18 * tmp124 tmp126 = tmp123 + tmp125 tmp127 = tmp63 - tmp23 tmp128 = tmp22 * tmp127 tmp129 = tmp126 + tmp128 tmp130 = tmp63 - tmp27 tmp131 = tmp26 * tmp130 tmp132 = tmp129 + tmp131 tmp133 = tmp121 + tmp132 tmp134 = tmp63 - tmp32 tmp135 = tmp31 * tmp134 tmp136 = tmp63 - tmp35 tmp137 = tmp34 * tmp136 tmp138 = tmp135 + tmp137 tmp139 = tmp63 - tmp39 tmp140 = tmp38 * tmp139 tmp141 = tmp138 + tmp140 tmp142 = tmp63 - tmp43 tmp143 = tmp42 * tmp142 tmp144 = tmp141 + tmp143 tmp145 = tmp133 + tmp144 tmp146 = tmp63 - tmp48 tmp147 = tmp47 * tmp146 tmp148 = tmp63 - tmp51 tmp149 = tmp50 * tmp148 tmp150 = tmp147 + tmp149 tmp151 = tmp63 - tmp55 tmp152 = tmp54 * tmp151 tmp153 = tmp150 + tmp152 tmp154 = tmp63 - tmp59 tmp155 = tmp58 * tmp154 tmp156 = tmp153 + tmp155 tmp157 = tmp145 + tmp156 tmp158 = 0.0 tmp159 = tmp62 + tmp158 tmp160 = 2.0 tmp161 = tmp159 * tmp160 tmp162 = tmp110 * tmp63 tmp163 = tmp161 + tmp162 tmp164 = tmp163 + tmp157 tmp165 = 1e-06 tmp166 = tmp164 + tmp165 tmp167 = tmp161 / tmp166 tmp168 = tmp63 - tmp167 tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK]) tmp171 = tl.sum(tmp169, 1)[:, None] tmp172 = 16.0 tmp173 = tmp171 / tmp172 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp173, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [tp, tp_1, tp_2, tp_3, mul_3, mul_4, sub, fn, fn_1, fn_2, mul_5, add, sub_1, fp, fp_1, fp_2, add_1, add_2, truediv, result, result_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.rsub, aten.div, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_rsub_sum_0.run(buf4, arg1_1, arg0_1, 1, 16, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Union from typing import Optional from typing import Iterable from torch import nn class FScoreLoss(nn.modules.loss._WeightedLoss): """Uses the 1 - F-score as a loss. .. math:: F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP } Args: beta: The beta in the above formula. eps: Epsilon for numerical stability. tp_bias: This is added to the TP count. Should add a little stability for very small structures. make_onehot: Convert the target segmentation to onehot internally. Turn this off if the target is already onehot. make_onehot_newaxis: 'newaxis' argument for the onehot conversion. ignore_index: These classes will not contribute to the loss. Has no effect if reduction is "none". weight: Weights for the different classes. Has no effect if reduction is "none". reduction: "mean", "sum", or "none". """ def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias: 'Union[int, float]'=0, make_onehot: 'bool'=False, make_onehot_newaxis: 'bool'=False, ignore_index: 'Optional[Union[int, Iterable[int]]]'=None, weight: 'Optional[torch.Tensor]'=None, reduction='mean', **kwargs): super(FScoreLoss, self).__init__(weight=weight, reduction=reduction) self.beta = beta self.eps = eps self.tp_bias = tp_bias self.make_onehot = make_onehot self.make_onehot_newaxis = make_onehot_newaxis self.ignore_index = ignore_index if isinstance(ignore_index, int): self.ignore_index = [ignore_index] def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Forward pass through the loss. Args: input_: Input with expected shape (B, C, ...) with C the number of classes. target: Target, either onehot with shape (B, C, ...), or not with shape either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and make_onehot_newaxis arguments appropriately. Returns: The loss. """ assert not target.requires_grad if self.make_onehot: target = make_onehot_segmentation(target, range(input_.shape[1] ), newaxis=self.make_onehot_newaxis) target = target.float() tp = input_ * target fn = (1 - input_) * target fp = input_ * (1 - target) while tp.dim() > 2: tp = tp.sum(-1, keepdim=False) fn = fn.sum(-1, keepdim=False) fp = fp.sum(-1, keepdim=False) tp += self.tp_bias result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta * self.beta) * tp + self.beta * self.beta * fn + fp + self.eps) if self.reduction != 'none': if self.weight is not None: self.weight = self.weight.float() self.weight = self.weight weight = self.weight.expand_as(result) result = result * weight if self.ignore_index is not None: for cls in sorted(self.ignore_index, reverse=True): if cls == result.shape[1] - 1: result = result[:, :-1] elif cls == 0: result = result[:, 1:] else: result = torch.cat([result[:, :cls], result[:, cls + 1:]], 1) if self.reduction == 'mean': result = torch.mean(result) elif self.reduction == 'sum': result = torch.sum(result) else: raise ValueError( "reduction must be 'none', 'mean' or 'sum', but is {}". format(self.reduction)) return result def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from typing import Union from typing import Optional from typing import Iterable from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 16 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 16 * r0), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 16 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 16 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 16 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 16 * r0), None, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 16 * r0), None, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (4 + 16 * r0), None, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (4 + 16 * r0), None, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (5 + 16 * r0), None, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (6 + 16 * r0), None, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (6 + 16 * r0), None, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + (7 + 16 * r0), None, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (7 + 16 * r0), None, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr0 + (8 + 16 * r0), None, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr1 + (8 + 16 * r0), None, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr0 + (9 + 16 * r0), None, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr1 + (9 + 16 * r0), None, eviction_policy='evict_last' ) tmp38 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr1 + (10 + 16 * r0), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (11 + 16 * r0), None, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr1 + (11 + 16 * r0), None, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (12 + 16 * r0), None, eviction_policy= 'evict_last') tmp48 = tl.load(in_ptr1 + (12 + 16 * r0), None, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (13 + 16 * r0), None, eviction_policy= 'evict_last') tmp51 = tl.load(in_ptr1 + (13 + 16 * r0), None, eviction_policy= 'evict_last') tmp54 = tl.load(in_ptr0 + (14 + 16 * r0), None, eviction_policy= 'evict_last') tmp55 = tl.load(in_ptr1 + (14 + 16 * r0), None, eviction_policy= 'evict_last') tmp58 = tl.load(in_ptr0 + (15 + 16 * r0), None, eviction_policy= 'evict_last') tmp59 = tl.load(in_ptr1 + (15 + 16 * r0), None, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = 1.0 tmp64 = tmp63 - tmp0 tmp65 = tmp64 * tmp1 tmp66 = tmp63 - tmp3 tmp67 = tmp66 * tmp4 tmp68 = tmp65 + tmp67 tmp69 = tmp63 - tmp7 tmp70 = tmp69 * tmp8 tmp71 = tmp68 + tmp70 tmp72 = tmp63 - tmp11 tmp73 = tmp72 * tmp12 tmp74 = tmp71 + tmp73 tmp75 = tmp63 - tmp15 tmp76 = tmp75 * tmp16 tmp77 = tmp63 - tmp18 tmp78 = tmp77 * tmp19 tmp79 = tmp76 + tmp78 tmp80 = tmp63 - tmp22 tmp81 = tmp80 * tmp23 tmp82 = tmp79 + tmp81 tmp83 = tmp63 - tmp26 tmp84 = tmp83 * tmp27 tmp85 = tmp82 + tmp84 tmp86 = tmp74 + tmp85 tmp87 = tmp63 - tmp31 tmp88 = tmp87 * tmp32 tmp89 = tmp63 - tmp34 tmp90 = tmp89 * tmp35 tmp91 = tmp88 + tmp90 tmp92 = tmp63 - tmp38 tmp93 = tmp92 * tmp39 tmp94 = tmp91 + tmp93 tmp95 = tmp63 - tmp42 tmp96 = tmp95 * tmp43 tmp97 = tmp94 + tmp96 tmp98 = tmp86 + tmp97 tmp99 = tmp63 - tmp47 tmp100 = tmp99 * tmp48 tmp101 = tmp63 - tmp50 tmp102 = tmp101 * tmp51 tmp103 = tmp100 + tmp102 tmp104 = tmp63 - tmp54 tmp105 = tmp104 * tmp55 tmp106 = tmp103 + tmp105 tmp107 = tmp63 - tmp58 tmp108 = tmp107 * tmp59 tmp109 = tmp106 + tmp108 tmp110 = tmp98 + tmp109 tmp111 = tmp63 - tmp1 tmp112 = tmp0 * tmp111 tmp113 = tmp63 - tmp4 tmp114 = tmp3 * tmp113 tmp115 = tmp112 + tmp114 tmp116 = tmp63 - tmp8 tmp117 = tmp7 * tmp116 tmp118 = tmp115 + tmp117 tmp119 = tmp63 - tmp12 tmp120 = tmp11 * tmp119 tmp121 = tmp118 + tmp120 tmp122 = tmp63 - tmp16 tmp123 = tmp15 * tmp122 tmp124 = tmp63 - tmp19 tmp125 = tmp18 * tmp124 tmp126 = tmp123 + tmp125 tmp127 = tmp63 - tmp23 tmp128 = tmp22 * tmp127 tmp129 = tmp126 + tmp128 tmp130 = tmp63 - tmp27 tmp131 = tmp26 * tmp130 tmp132 = tmp129 + tmp131 tmp133 = tmp121 + tmp132 tmp134 = tmp63 - tmp32 tmp135 = tmp31 * tmp134 tmp136 = tmp63 - tmp35 tmp137 = tmp34 * tmp136 tmp138 = tmp135 + tmp137 tmp139 = tmp63 - tmp39 tmp140 = tmp38 * tmp139 tmp141 = tmp138 + tmp140 tmp142 = tmp63 - tmp43 tmp143 = tmp42 * tmp142 tmp144 = tmp141 + tmp143 tmp145 = tmp133 + tmp144 tmp146 = tmp63 - tmp48 tmp147 = tmp47 * tmp146 tmp148 = tmp63 - tmp51 tmp149 = tmp50 * tmp148 tmp150 = tmp147 + tmp149 tmp151 = tmp63 - tmp55 tmp152 = tmp54 * tmp151 tmp153 = tmp150 + tmp152 tmp154 = tmp63 - tmp59 tmp155 = tmp58 * tmp154 tmp156 = tmp153 + tmp155 tmp157 = tmp145 + tmp156 tmp158 = 0.0 tmp159 = tmp62 + tmp158 tmp160 = 2.0 tmp161 = tmp159 * tmp160 tmp162 = tmp110 * tmp63 tmp163 = tmp161 + tmp162 tmp164 = tmp163 + tmp157 tmp165 = 1e-06 tmp166 = tmp164 + tmp165 tmp167 = tmp161 / tmp166 tmp168 = tmp63 - tmp167 tmp169 = tl.broadcast_to(tmp168, [XBLOCK, RBLOCK]) tmp171 = tl.sum(tmp169, 1)[:, None] tmp172 = 16.0 tmp173 = tmp171 / tmp172 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp173, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 get_raw_stream(0) triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](buf4, arg1_1, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class FScoreLossNew(nn.modules.loss._WeightedLoss): """Uses the 1 - F-score as a loss. .. math:: F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP } Args: beta: The beta in the above formula. eps: Epsilon for numerical stability. tp_bias: This is added to the TP count. Should add a little stability for very small structures. make_onehot: Convert the target segmentation to onehot internally. Turn this off if the target is already onehot. make_onehot_newaxis: 'newaxis' argument for the onehot conversion. ignore_index: These classes will not contribute to the loss. Has no effect if reduction is "none". weight: Weights for the different classes. Has no effect if reduction is "none". reduction: "mean", "sum", or "none". """ def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias: 'Union[int, float]'=0, make_onehot: 'bool'=False, make_onehot_newaxis: 'bool'=False, ignore_index: 'Optional[Union[int, Iterable[int]]]'=None, weight: 'Optional[torch.Tensor]'=None, reduction='mean', **kwargs): super(FScoreLossNew, self).__init__(weight=weight, reduction=reduction) self.beta = beta self.eps = eps self.tp_bias = tp_bias self.make_onehot = make_onehot self.make_onehot_newaxis = make_onehot_newaxis self.ignore_index = ignore_index if isinstance(ignore_index, int): self.ignore_index = [ignore_index] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MIC-DKFZ/image-time-series
FScoreLoss
false
5,579
[ "MIT" ]
1
0480d5cb6936c7d9e839b6741f18c10893d78d8a
https://github.com/MIC-DKFZ/image-time-series/tree/0480d5cb6936c7d9e839b6741f18c10893d78d8a
import torch from typing import Union from typing import Optional from typing import Iterable from torch import nn class Model(nn.modules.loss._WeightedLoss): """Uses the 1 - F-score as a loss. .. math:: F = rac{ (1 + eta^2) TP }{ (1 + eta^2) TP + eta^2 FN + FP } Args: beta: The beta in the above formula. eps: Epsilon for numerical stability. tp_bias: This is added to the TP count. Should add a little stability for very small structures. make_onehot: Convert the target segmentation to onehot internally. Turn this off if the target is already onehot. make_onehot_newaxis: 'newaxis' argument for the onehot conversion. ignore_index: These classes will not contribute to the loss. Has no effect if reduction is "none". weight: Weights for the different classes. Has no effect if reduction is "none". reduction: "mean", "sum", or "none". """ def __init__(self, beta: 'float'=1.0, eps: 'float'=1e-06, tp_bias: 'Union[int, float]'=0, make_onehot: 'bool'=False, make_onehot_newaxis: 'bool'=False, ignore_index: 'Optional[Union[int, Iterable[int]]]'=None, weight: 'Optional[torch.Tensor]'=None, reduction='mean', **kwargs): super().__init__(weight=weight, reduction=reduction) self.beta = beta self.eps = eps self.tp_bias = tp_bias self.make_onehot = make_onehot self.make_onehot_newaxis = make_onehot_newaxis self.ignore_index = ignore_index if isinstance(ignore_index, int): self.ignore_index = [ignore_index] def forward(self, input_: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """Forward pass through the loss. Args: input_: Input with expected shape (B, C, ...) with C the number of classes. target: Target, either onehot with shape (B, C, ...), or not with shape either (B, ...) or (B, 1, ...). Make sure to set the make_onehot and make_onehot_newaxis arguments appropriately. Returns: The loss. """ assert not target.requires_grad if self.make_onehot: target = make_onehot_segmentation(target, range(input_.shape[1] ), newaxis=self.make_onehot_newaxis) target = target.float() tp = input_ * target fn = (1 - input_) * target fp = input_ * (1 - target) while tp.dim() > 2: tp = tp.sum(-1, keepdim=False) fn = fn.sum(-1, keepdim=False) fp = fp.sum(-1, keepdim=False) tp += self.tp_bias result = 1 - (1 + self.beta * self.beta) * tp / ((1 + self.beta * self.beta) * tp + self.beta * self.beta * fn + fp + self.eps) if self.reduction != 'none': if self.weight is not None: self.weight = self.weight.float() self.weight = self.weight weight = self.weight.expand_as(result) result = result * weight if self.ignore_index is not None: for cls in sorted(self.ignore_index, reverse=True): if cls == result.shape[1] - 1: result = result[:, :-1] elif cls == 0: result = result[:, 1:] else: result = torch.cat([result[:, :cls], result[:, cls + 1:]], 1) if self.reduction == 'mean': result = torch.mean(result) elif self.reduction == 'sum': result = torch.sum(result) else: raise ValueError( "reduction must be 'none', 'mean' or 'sum', but is {}". format(self.reduction)) return result def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_i # ... truncated (>4000 chars) for memory efficiency
Conv_Block_gn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/aq/caqnmb3rs4apw4atzgbt3bm7z3qshygxt3ddekkomaxcoiuso5oy.py # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.convolution, aten.native_group_norm] # Source node to ATen node mapping: # x => convolution # x_2 => add, add_1, mul_2, rsqrt, var_mean # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_2), kwargs = {}) triton_per_fused_convolution_native_group_norm_0 = async_compile.triton('triton_per_fused_convolution_native_group_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r3 = rindex x0 = xindex r2 = (rindex // 9) tmp0 = tl.load(in_out_ptr0 + (r3 + (36*x0)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(rmask & xmask, tmp8, 0) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp13 = tl.where(rmask & xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tl.full([XBLOCK, 1], 36, tl.int32) tmp16 = tmp15.to(tl.float32) tmp17 = tmp14 / tmp16 tmp18 = tmp8 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask & xmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = tmp7 - tmp17 tmp25 = 36.0 tmp26 = tmp23 / tmp25 tmp27 = 1e-05 tmp28 = tmp26 + tmp27 tmp29 = libdevice.rsqrt(tmp28) tmp30 = tmp24 * tmp29 tmp32 = tmp30 * tmp31 tmp34 = tmp32 + tmp33 tl.store(in_out_ptr0 + (r3 + (36*x0)), tmp2, rmask & xmask) tl.store(out_ptr2 + (r3 + (36*x0)), tmp34, rmask & xmask) tl.store(out_ptr3 + (x0), tmp29, xmask) tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.convolution, aten.native_group_norm] stream0 = get_raw_stream(0) triton_per_fused_convolution_native_group_norm_0.run(buf1, primals_2, primals_4, primals_5, buf2, buf5, buf6, 4, 36, grid=grid(4), stream=stream0) del primals_2 del primals_5 return (buf5, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf2, (4, 1), (1, 1), 0), reinterpret_tensor(buf6, (4, 1), (1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd.variable import * class Conv_Block_gn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, groups, stride=1 ): super(Conv_Block_gn, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=1) self.relu = torch.nn.LeakyReLU() self.bn = nn.GroupNorm(groups, out_channels) def forward(self, x): x = self.conv(x) x = self.relu(x) x = self.bn(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.autograd.variable import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r3 = rindex x0 = xindex r2 = rindex // 9 tmp0 = tl.load(in_out_ptr0 + (r3 + 36 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last', other=0.0 ) tmp31 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr2 + r2, rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp8, 0) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp13 = tl.where(rmask & xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tl.full([XBLOCK, 1], 36, tl.int32) tmp16 = tmp15.to(tl.float32) tmp17 = tmp14 / tmp16 tmp18 = tmp8 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK]) tmp22 = tl.where(rmask & xmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tmp24 = tmp7 - tmp17 tmp25 = 36.0 tmp26 = tmp23 / tmp25 tmp27 = 1e-05 tmp28 = tmp26 + tmp27 tmp29 = libdevice.rsqrt(tmp28) tmp30 = tmp24 * tmp29 tmp32 = tmp30 * tmp31 tmp34 = tmp32 + tmp33 tl.store(in_out_ptr0 + (r3 + 36 * x0), tmp2, rmask & xmask) tl.store(out_ptr2 + (r3 + 36 * x0), tmp34, rmask & xmask) tl.store(out_ptr3 + x0, tmp29, xmask) tl.store(out_ptr0 + x0, tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_convolution_native_group_norm_0[grid(4)](buf1, primals_2, primals_4, primals_5, buf2, buf5, buf6, 4, 36, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_5 return buf5, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf2 , (4, 1), (1, 1), 0), reinterpret_tensor(buf6, (4, 1), (1, 1), 0) class Conv_Block_gnNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, groups, stride=1 ): super(Conv_Block_gnNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=1) self.relu = torch.nn.LeakyReLU() self.bn = nn.GroupNorm(groups, out_channels) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.bn.weight primals_5 = self.bn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
MRLoghmani/Separate_to_Adapt
Conv_Block_gn
false
5,580
[ "MIT" ]
1
09c734448aa22b3879186f59952d9fd596d4a1f8
https://github.com/MRLoghmani/Separate_to_Adapt/tree/09c734448aa22b3879186f59952d9fd596d4a1f8
import torch import torch.nn as nn from torch.autograd.variable import * class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, groups, stride=1 ): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=1) self.relu = torch.nn.LeakyReLU() self.bn = nn.GroupNorm(groups, out_channels) def forward(self, x): x = self.conv(x) x = self.relu(x) x = self.bn(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'groups': 1}]
FCNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/bm/cbmd63mrouqmm2pha5x6evse3dkbpy5o4xnk5v7quflfkqfdvwck.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # output_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (5, 4), (4, 1)) assert_size_stride(primals_2, (5, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 5), (5, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 320, grid=grid(320), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 5), (5, 1), 0), reinterpret_tensor(primals_4, (5, 4), (1, 5), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (256, ), (1, ), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 5), (5, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 5), (5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from typing import * class FCNet(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.l1 = nn.Linear(input_size, 5) self.relu = nn.ReLU() self.l2 = nn.Linear(5, output_size) def forward(self, x): output = self.l1(x) output = self.relu(output) output = self.l2(output) return output.view(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (5, 4), (4, 1)) assert_size_stride(primals_2, (5,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 5), (5, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 5), (80, 20, 5, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(320)](buf1, primals_2, buf3, 320, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 5), ( 5, 1), 0), reinterpret_tensor(primals_4, (5, 4), (1, 5), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (256,), (1,), 0), reinterpret_tensor( primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 5), ( 5, 1), 0), primals_4, buf3 class FCNetNew(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.l1 = nn.Linear(input_size, 5) self.relu = nn.ReLU() self.l2 = nn.Linear(5, output_size) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Markus92/nni
FCNet
false
5,581
[ "MIT" ]
1
2641c7343f4b411b002bea4f5648941268194ed7
https://github.com/Markus92/nni/tree/2641c7343f4b411b002bea4f5648941268194ed7
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from typing import * class Model(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.l1 = nn.Linear(input_size, 5) self.relu = nn.ReLU() self.l2 = nn.Linear(5, output_size) def forward(self, x): output = self.l1(x) output = self.relu(output) output = self.l2(output) return output.view(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
PFLDLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nx/cnxzajv6akmfdny3r4ud7m5las64tc7zew2ydgeuizr3yk7g7cfn.py # Topologically Sorted Source Nodes: [sub, cos, sub_1, weight_angle, sub_2, pow_1, l2_distant, mul, mean, mean_1], Original ATen: [aten.sub, aten.cos, aten.rsub, aten.sum, aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # cos => cos # l2_distant => sum_2 # mean => mean # mean_1 => mean_1 # mul => mul # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # weight_angle => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%sub,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %cos), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [1]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg3_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp19 = tl.load(in_ptr2 + (r0 + (64*r1)), None) tmp20 = tl.load(in_ptr3 + (r0 + (64*r1)), None) tmp25 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None) tmp26 = tl.load(in_ptr3 + (16 + r0 + (64*r1)), None) tmp31 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None) tmp32 = tl.load(in_ptr3 + (32 + r0 + (64*r1)), None) tmp37 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None) tmp38 = tl.load(in_ptr3 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp21 = tmp19 - tmp20 tmp22 = tl_math.cos(tmp21) tmp23 = 1.0 tmp24 = tmp23 - tmp22 tmp27 = tmp25 - tmp26 tmp28 = tl_math.cos(tmp27) tmp29 = tmp23 - tmp28 tmp30 = tmp24 + tmp29 tmp33 = tmp31 - tmp32 tmp34 = tl_math.cos(tmp33) tmp35 = tmp23 - tmp34 tmp36 = tmp30 + tmp35 tmp39 = tmp37 - tmp38 tmp40 = tl_math.cos(tmp39) tmp41 = tmp23 - tmp40 tmp42 = tmp36 + tmp41 tmp43 = tmp42 * tmp18 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = tl.sum(tmp44, 1)[:, None] tmp47 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp49 = tl.sum(tmp47, 1)[:, None] tmp50 = 64.0 tmp51 = tmp46 / tmp50 tmp52 = tmp49 / tmp50 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp51, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp52, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf2; del buf2 # reuse buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [sub, cos, sub_1, weight_angle, sub_2, pow_1, l2_distant, mul, mean, mean_1], Original ATen: [aten.sub, aten.cos, aten.rsub, aten.sum, aten.pow, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0.run(buf4, buf5, arg2_1, arg3_1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return (buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from typing import * class PFLDLoss(nn.Module): """Weighted loss of L2 distance with the pose angle for PFLD.""" def __init__(self): super(PFLDLoss, self).__init__() def forward(self, landmark_gt, euler_angle_gt, angle, landmarks): """ Calculate weighted L2 loss for PFLD. Parameters ---------- landmark_gt : tensor the ground truth of landmarks euler_angle_gt : tensor the ground truth of pose angle angle : tensor the predicted pose angle landmarks : float32 the predicted landmarks Returns ------- output: tensor the weighted L2 loss output: tensor the normal L2 loss """ weight_angle = torch.sum(1 - torch.cos(angle - euler_angle_gt), axis=1) l2_distant = torch.sum((landmark_gt - landmarks) ** 2, axis=1) return torch.mean(weight_angle * l2_distant), torch.mean(l2_distant) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp20 = tl.load(in_ptr3 + (r0 + 64 * r1), None) tmp25 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp26 = tl.load(in_ptr3 + (16 + r0 + 64 * r1), None) tmp31 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp32 = tl.load(in_ptr3 + (32 + r0 + 64 * r1), None) tmp37 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp38 = tl.load(in_ptr3 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp21 = tmp19 - tmp20 tmp22 = tl_math.cos(tmp21) tmp23 = 1.0 tmp24 = tmp23 - tmp22 tmp27 = tmp25 - tmp26 tmp28 = tl_math.cos(tmp27) tmp29 = tmp23 - tmp28 tmp30 = tmp24 + tmp29 tmp33 = tmp31 - tmp32 tmp34 = tl_math.cos(tmp33) tmp35 = tmp23 - tmp34 tmp36 = tmp30 + tmp35 tmp39 = tmp37 - tmp38 tmp40 = tl_math.cos(tmp39) tmp41 = tmp23 - tmp40 tmp42 = tmp36 + tmp41 tmp43 = tmp42 * tmp18 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = tl.sum(tmp44, 1)[:, None] tmp47 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp49 = tl.sum(tmp47, 1)[:, None] tmp50 = 64.0 tmp51 = tmp46 / tmp50 tmp52 = tmp49 / tmp50 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp52, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 buf5 = buf3 del buf3 get_raw_stream(0) triton_per_fused_cos_mean_mul_pow_rsub_sub_sum_0[grid(1)](buf4, buf5, arg2_1, arg3_1, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf4, buf5 class PFLDLossNew(nn.Module): """Weighted loss of L2 distance with the pose angle for PFLD.""" def __init__(self): super(PFLDLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
Markus92/nni
PFLDLoss
false
5,582
[ "MIT" ]
1
2641c7343f4b411b002bea4f5648941268194ed7
https://github.com/Markus92/nni/tree/2641c7343f4b411b002bea4f5648941268194ed7
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from typing import * class Model(nn.Module): """Weighted loss of L2 distance with the pose angle for PFLD.""" def __init__(self): super().__init__() def forward(self, landmark_gt, euler_angle_gt, angle, landmarks): """ Calculate weighted L2 loss for PFLD. Parameters ---------- landmark_gt : tensor the ground truth of landmarks euler_angle_gt : tensor the ground truth of pose angle angle : tensor the predicted pose angle landmarks : float32 the predicted landmarks Returns ------- output: tensor the weighted L2 loss output: tensor the normal L2 loss """ weight_angle = torch.sum(1 - torch.cos(angle - euler_angle_gt), axis=1) l2_distant = torch.sum((landmark_gt - landmarks) ** 2, axis=1) return torch.mean(weight_angle * l2_distant), torch.mean(l2_distant) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
VarifocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ax/caxbx6i4bwtsbtfhik5fb25fzuvlc7qeuptufx227j5xebtapxg3.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, gt, float_1, mul, pred_sigmoid, sub, abs_1, pow_1, mul_1, le, float_2, mul_2, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.gt, aten._to_copy, aten.mul, aten.sigmoid, aten.sub, aten.abs, aten.pow, aten.le, aten.add, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # binary_cross_entropy_with_logits => abs_2, exp, full_default, log1p, minimum, mul_3, neg, sub_1, sub_2, sub_3 # float_1 => convert_element_type # float_2 => convert_element_type_1 # focal_weight => add # gt => gt # le => le # loss => mul_4 # loss_1 => mean # loss_cls => mul_5 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # pred_sigmoid => sigmoid # sub => sub # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %sub_2), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg1_1, 0.0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.75), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%arg1_1, 0.0), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%le, torch.float32), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %convert_element_type_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %add), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0 = async_compile.triton('triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp0 > tmp5 tmp14 = tmp13.to(tl.float32) tmp15 = tmp0 * tmp14 tmp16 = tl.sigmoid(tmp3) tmp17 = tmp16 - tmp0 tmp18 = tl_math.abs(tmp17) tmp19 = tmp18 * tmp18 tmp20 = 0.75 tmp21 = tmp19 * tmp20 tmp22 = tmp0 <= tmp5 tmp23 = tmp22.to(tl.float32) tmp24 = tmp21 * tmp23 tmp25 = tmp15 + tmp24 tmp26 = tmp12 * tmp25 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = 256.0 tmp31 = tmp29 / tmp30 tmp32 = tmp31 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp32, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, gt, float_1, mul, pred_sigmoid, sub, abs_1, pow_1, mul_1, le, float_2, mul_2, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.gt, aten._to_copy, aten.mul, aten.sigmoid, aten.sub, aten.abs, aten.pow, aten.le, aten.add, aten.mean] stream0 = get_raw_stream(0) triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid - target).abs().pow(gamma) * (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target ).abs().pow(gamma) * (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.use_sigmoid: loss_cls = self.loss_weight * varifocal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted= self.iou_weighted, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp0 > tmp5 tmp14 = tmp13.to(tl.float32) tmp15 = tmp0 * tmp14 tmp16 = tl.sigmoid(tmp3) tmp17 = tmp16 - tmp0 tmp18 = tl_math.abs(tmp17) tmp19 = tmp18 * tmp18 tmp20 = 0.75 tmp21 = tmp19 * tmp20 tmp22 = tmp0 <= tmp5 tmp23 = tmp22.to(tl.float32) tmp24 = tmp21 * tmp23 tmp25 = tmp15 + tmp24 tmp26 = tmp12 * tmp25 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = 256.0 tmp31 = tmp29 / tmp30 tmp32 = tmp31 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid - target).abs().pow(gamma) * (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target ).abs().pow(gamma) * (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class VarifocalLossNew(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLossNew, self).__init__() assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MatthewInkawhich/object_localization_network
VarifocalLoss
false
5,583
[ "Apache-2.0" ]
1
3fddaacfcef33f03af48b746e95ebd7d74dbb27f
https://github.com/MatthewInkawhich/object_localization_network/tree/3fddaacfcef33f03af48b746e95ebd7d74dbb27f
import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid - target).abs().pow(gamma) * (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target ).abs().pow(gamma) * (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class Model(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. # ... truncated (>4000 chars) for memory efficiency
EncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # y => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # y => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kd/ckd3lhdfl7shyukzlxpvlilao7e5s7ib63y5ilra2gqzt6ijpwwo.py # Topologically Sorted Source Nodes: [q_2, x], Original ATen: [aten.mul, aten.clone] # Source node to ATen node mapping: # q_2 => mul_2 # x => clone # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_mul_2 = async_compile.triton('triton_poi_fused_clone_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_mul_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3r/c3rsks6vi53ggj2qfjmhu7vc3vqskqtyr7gc4fdp74wzt6pdrjx4.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] # Source node to ATen node mapping: # x => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/iv/civdgwzpphyda4rs4fr3g6w25bprv7bn4anqgivrgzavi7xr5pdl.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [3], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [3], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4k/c4kxxzyxk45cygbwnqkt5vb2udxol67wuotkh6zmuwsinb63uprn.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_4 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pw/cpw6jeu3vr4erot3bahfr3gqwa2vqailcc5xbtuiztd5l6w2zwjm.py # Topologically Sorted Source Nodes: [x_7, y_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x_7 => add_2 # y_2 => var_mean_1 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_17), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hy/chy77uudjf33jqcobrq24clniixoyn2vucadggy6c6n33mq4favb.py # Topologically Sorted Source Nodes: [x_7, y_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # x_7 => add_2 # y_2 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_17), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_12), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_13), kwargs = {}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dp/cdplbbjhtn7wjfs5zbdr7dqzrhv6sxravwmmbhqyrtfejnoccqhe.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_9 => add_5, erf, mul_5, mul_6, mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_5), kwargs = {}) triton_poi_fused_gelu_9 = async_compile.triton('triton_poi_fused_gelu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5o/c5oq2dd4gv3qfn7dbi4gn37m4q5cqp3syup6457zs5qf2qor5xrv.py # Topologically Sorted Source Nodes: [x_7, x_11], Original ATen: [aten.add] # Source node to ATen node mapping: # x_11 => add_6 # x_7 => add_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_17), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_21), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_out_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [q_2, x], Original ATen: [aten.mul, aten.clone] triton_poi_fused_clone_mul_2.run(buf3, primals_5, buf6, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf4, primals_7, buf7, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf8, buf9, 256, grid=grid(256), stream=stream0) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf9, buf10, 256, grid=grid(256), stream=stream0) del buf9 buf11 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf5, primals_9, buf11, 16, 4, grid=grid(16, 4), stream=stream0) del primals_9 buf12 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf12, buf13, 16, 4, grid=grid(16, 4), stream=stream0) buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_11 buf15 = buf1; del buf1 # reuse buf16 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_7, y_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_3, buf14, buf15, buf16, 16, grid=grid(16), stream=stream0) buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7, y_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf14, buf15, buf16, primals_12, primals_13, buf17, 64, grid=grid(64), stream=stream0) del buf15 del buf16 del primals_13 buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf18) del primals_15 buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu] triton_poi_fused_gelu_9.run(buf18, buf19, 64, grid=grid(64), stream=stream0) buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf20) buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0); del buf20 # reuse # Topologically Sorted Source Nodes: [x_7, x_11], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf21, primals_3, buf14, primals_17, 64, grid=grid(64), stream=stream0) del primals_17 return (buf21, primals_3, primals_12, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), buf14, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), primals_16, primals_14, primals_10, reinterpret_tensor(buf11, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0), primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FeedForwardNetwork(nn.Module): def __init__(self, hidden_size, ffn_size, dropout_rate): super(FeedForwardNetwork, self).__init__() self.layer1 = nn.Linear(hidden_size, ffn_size) self.gelu = nn.GELU() self.layer2 = nn.Linear(ffn_size, hidden_size) def forward(self, x): x = self.layer1(x) x = self.gelu(x) x = self.layer2(x) return x class MultiHeadAttention(nn.Module): def __init__(self, hidden_size, attention_dropout_rate, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.att_size = att_size = hidden_size // num_heads self.scale = att_size ** -0.5 self.linear_q = nn.Linear(hidden_size, num_heads * att_size) self.linear_k = nn.Linear(hidden_size, num_heads * att_size) self.linear_v = nn.Linear(hidden_size, num_heads * att_size) self.att_dropout = nn.Dropout(attention_dropout_rate) self.output_layer = nn.Linear(num_heads * att_size, hidden_size) def forward(self, q, k, v, attn_bias=None): orig_q_size = q.size() d_k = self.att_size d_v = self.att_size batch_size = q.size(0) q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k) k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k) v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v) q = q.transpose(1, 2) v = v.transpose(1, 2) k = k.transpose(1, 2).transpose(2, 3) q = q * self.scale x = torch.matmul(q, k) if attn_bias is not None: x = x + attn_bias x = torch.softmax(x, dim=3) x = self.att_dropout(x) x = x.matmul(v) x = x.transpose(1, 2).contiguous() x = x.view(batch_size, -1, self.num_heads * d_v) x = self.output_layer(x) assert x.size() == orig_q_size return x class EncoderLayer(nn.Module): def __init__(self, hidden_size, ffn_size, dropout_rate, attention_dropout_rate, num_heads): super(EncoderLayer, self).__init__() self.self_attention_norm = nn.LayerNorm(hidden_size) self.self_attention = MultiHeadAttention(hidden_size, attention_dropout_rate, num_heads) self.self_attention_dropout = nn.Dropout(dropout_rate) self.ffn_norm = nn.LayerNorm(hidden_size) self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate) self.ffn_dropout = nn.Dropout(dropout_rate) def forward(self, x, attn_bias=None): y = self.self_attention_norm(x) y = self.self_attention(y, y, y, attn_bias) y = self.self_attention_dropout(y) x = x + y y = self.ffn_norm(x) y = self.ffn(y) y = self.ffn_dropout(y) x = x + y return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'ffn_size': 4, 'dropout_rate': 0.5, 'attention_dropout_rate': 0.5, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_mul_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, primals_5, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = reinterpret_tensor(buf3, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf3 triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_7, buf7, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_5[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_clone_3[grid(16, 4)](buf5, primals_9, buf11, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf12 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0) del buf12 extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_11 buf15 = buf1 del buf1 buf16 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf14, buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_3, buf14, buf15, buf16, primals_12, primals_13, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf15 del buf16 del primals_13 buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf18) del primals_15 buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_gelu_9[grid(64)](buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf20) buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0) del buf20 triton_poi_fused_add_10[grid(64)](buf21, primals_3, buf14, primals_17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_17 return buf21, primals_3, primals_12, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0 ), buf14, reinterpret_tensor(buf17, (16, 4), (4, 1), 0 ), buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0 ), primals_16, primals_14, primals_10, reinterpret_tensor(buf11, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0 ), primals_8, primals_6, primals_4 class FeedForwardNetwork(nn.Module): def __init__(self, hidden_size, ffn_size, dropout_rate): super(FeedForwardNetwork, self).__init__() self.layer1 = nn.Linear(hidden_size, ffn_size) self.gelu = nn.GELU() self.layer2 = nn.Linear(ffn_size, hidden_size) def forward(self, x): x = self.layer1(x) x = self.gelu(x) x = self.layer2(x) return x class MultiHeadAttention(nn.Module): def __init__(self, hidden_size, attention_dropout_rate, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.att_size = att_size = hidden_size // num_heads self.scale = att_size ** -0.5 self.linear_q = nn.Linear(hidden_size, num_heads * att_size) self.linear_k = nn.Linear(hidden_size, num_heads * att_size) self.linear_v = nn.Linear(hidden_size, num_heads * att_size) self.att_dropout = nn.Dropout(attention_dropout_rate) self.output_layer = nn.Linear(num_heads * att_size, hidden_size) def forward(self, q, k, v, attn_bias=None): orig_q_size = q.size() d_k = self.att_size d_v = self.att_size batch_size = q.size(0) q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k) k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k) v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v) q = q.transpose(1, 2) v = v.transpose(1, 2) k = k.transpose(1, 2).transpose(2, 3) q = q * self.scale x = torch.matmul(q, k) if attn_bias is not None: x = x + attn_bias x = torch.softmax(x, dim=3) x = self.att_dropout(x) x = x.matmul(v) x = x.transpose(1, 2).contiguous() x = x.view(batch_size, -1, self.num_heads * d_v) x = self.output_layer(x) assert x.size() == orig_q_size return x class EncoderLayerNew(nn.Module): def __init__(self, hidden_size, ffn_size, dropout_rate, attention_dropout_rate, num_heads): super(EncoderLayerNew, self).__init__() self.self_attention_norm = nn.LayerNorm(hidden_size) self.self_attention = MultiHeadAttention(hidden_size, attention_dropout_rate, num_heads) self.self_attention_dropout = nn.Dropout(dropout_rate) self.ffn_norm = nn.LayerNorm(hidden_size) self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate) self.ffn_dropout = nn.Dropout(dropout_rate) def forward(self, input_0): primals_1 = self.self_attention_norm.weight primals_2 = self.self_attention_norm.bias primals_4 = self.self_attention.linear_q.weight primals_5 = self.self_attention.linear_q.bias primals_6 = self.self_attention.linear_k.weight primals_7 = self.self_attention.linear_k.bias primals_8 = self.self_attention.linear_v.weight primals_9 = self.self_attention.linear_v.bias primals_10 = self.self_attention.output_layer.weight primals_11 = self.self_attention.output_layer.bias primals_12 = self.ffn_norm.weight primals_13 = self.ffn_norm.bias primals_14 = self.ffn.layer1.weight primals_15 = self.ffn.layer1.bias primals_16 = self.ffn.layer2.weight primals_17 = self.ffn.layer2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
Luo-Chang/Graphormer
EncoderLayer
false
5,584
[ "MIT" ]
1
b35b3ca6369e25cdae80e1617bfc3921feeb3158
https://github.com/Luo-Chang/Graphormer/tree/b35b3ca6369e25cdae80e1617bfc3921feeb3158
import torch import torch.nn as nn class FeedForwardNetwork(nn.Module): def __init__(self, hidden_size, ffn_size, dropout_rate): super().__init__() self.layer1 = nn.Linear(hidden_size, ffn_size) self.gelu = nn.GELU() self.layer2 = nn.Linear(ffn_size, hidden_size) def forward(self, x): x = self.layer1(x) x = self.gelu(x) x = self.layer2(x) return x class MultiHeadAttention(nn.Module): def __init__(self, hidden_size, attention_dropout_rate, num_heads): super().__init__() self.num_heads = num_heads self.att_size = att_size = hidden_size // num_heads self.scale = att_size ** -0.5 self.linear_q = nn.Linear(hidden_size, num_heads * att_size) self.linear_k = nn.Linear(hidden_size, num_heads * att_size) self.linear_v = nn.Linear(hidden_size, num_heads * att_size) self.att_dropout = nn.Dropout(attention_dropout_rate) self.output_layer = nn.Linear(num_heads * att_size, hidden_size) def forward(self, q, k, v, attn_bias=None): orig_q_size = q.size() d_k = self.att_size d_v = self.att_size batch_size = q.size(0) q = self.linear_q(q).view(batch_size, -1, self.num_heads, d_k) k = self.linear_k(k).view(batch_size, -1, self.num_heads, d_k) v = self.linear_v(v).view(batch_size, -1, self.num_heads, d_v) q = q.transpose(1, 2) v = v.transpose(1, 2) k = k.transpose(1, 2).transpose(2, 3) q = q * self.scale x = torch.matmul(q, k) if attn_bias is not None: x = x + attn_bias x = torch.softmax(x, dim=3) x = self.att_dropout(x) x = x.matmul(v) x = x.transpose(1, 2).contiguous() x = x.view(batch_size, -1, self.num_heads * d_v) x = self.output_layer(x) assert x.size() == orig_q_size return x class Model(nn.Module): def __init__(self, hidden_size, ffn_size, dropout_rate, attention_dropout_rate, num_heads): super().__init__() self.self_attention_norm = nn.LayerNorm(hidden_size) self.self_attention = MultiHeadAttention(hidden_size, attention_dropout_rate, num_heads) self.self_attention_dropout = nn.Dropout(dropout_rate) self.ffn_norm = nn.LayerNorm(hidden_size) self.ffn = FeedForwardNetwork(hidden_size, ffn_size, dropout_rate) self.ffn_dropout = nn.Dropout(dropout_rate) def forward(self, x, attn_bias=None): y = self.self_attention_norm(x) y = self.self_attention(y, y, y, attn_bias) y = self.self_attention_dropout(y) x = x + y y = self.ffn_norm(x) y = self.ffn(y) y = self.ffn_dropout(y) x = x + y return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'ffn_size': 4, 'dropout_rate': 0.5, 'attention_dropout_rate': 0.5, 'num_heads': 4}]
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/4d/c4d7os35bf4bckecmik4nlyqqsirmteh4sh3yxnab5lmuntnmwk2.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x2) + (36*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/j5/cj5nf2owtsdm2zwcezqxpyn63iwddjyadpotkhm2ua52inoqxdcl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tl/ctlxctn7eg6nwvpdhdhyqadp63cm2ogdwxsotfynexn2zw62nfbb.py # Topologically Sorted Source Nodes: [conv2d, a1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # a1 => relu # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ku/ckuw5gg26ddjp4n4da74yttcx6jxcy2y4vb2npxdoq42pzni2oot.py # Topologically Sorted Source Nodes: [conv2d_1, a2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # a2 => relu_1 # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/m3/cm3haovccm7lav2s6wgp3wthu7in42r335z2o7yva4d7olh5begj.py # Topologically Sorted Source Nodes: [conv2d_2, a3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # a3 => relu_2 # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ir/cirx6nbkabstacj3yb3umtzb7ustxzn5ha5etdpsewqc2v53x42u.py # Topologically Sorted Source Nodes: [conv2d_3, p1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # p1 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (4*x2) + (64*y1)), tmp6, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2i/c2ijyqlcmcsnugr7aonjxredzbtnlbckjkwduhz675ye4ugjebkw.py # Topologically Sorted Source Nodes: [p_out], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_out => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_8 = async_compile.triton('triton_poi_fused__softmax_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_8(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tmp0 - tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = tmp2 / tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/er/cerrhd6dfklfkghffr4w4v6k4tkknpp6pjf2fpylkzd3qma7oygl.py # Topologically Sorted Source Nodes: [conv2d_4, v1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # v1 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (2*x2) + (32*y1)), tmp6, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ac/cacwmday2hjmsf4z7tj45ooyukpcmpul6vx42cwcasvjda6tvfr5.py # Topologically Sorted Source Nodes: [v_out], Original ATen: [aten.tanh] # Source node to ATen node mapping: # v_out => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_tanh_10 = async_compile.triton('triton_poi_fused_tanh_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr0 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (64, 256), (256, 1)) assert_size_stride(primals_11, (64, ), (1, )) assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_13, (2, ), (1, )) assert_size_stride(primals_14, (1, 128), (128, 1)) assert_size_stride(primals_15, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 128, 9, grid=grid(128, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d, a1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf5, primals_2, 2048, grid=grid(2048), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_1, a2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf7, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, a3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf9, primals_7, 8192, grid=grid(8192), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4)) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, p1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_7.run(buf10, primals_9, buf11, buf19, 16, 16, grid=grid(16, 16), stream=stream0) del buf10 del primals_9 buf12 = empty_strided_cuda((1, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (1, 256), (0, 1), 0), reinterpret_tensor(primals_10, (256, 64), (1, 256), 0), alpha=1, beta=1, out=buf12) del primals_11 buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [p_out], Original ATen: [aten._softmax] triton_poi_fused__softmax_8.run(buf13, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 2, 4, 4), (32, 1, 8, 2)) buf15 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) buf18 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool) # Topologically Sorted Source Nodes: [conv2d_4, v1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_9.run(buf14, primals_13, buf15, buf18, 8, 16, grid=grid(8, 16), stream=stream0) del buf14 del primals_13 buf16 = empty_strided_cuda((1, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf15, (1, 128), (0, 1), 0), reinterpret_tensor(primals_14, (128, 1), (1, 128), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [v_out], Original ATen: [aten.tanh] triton_poi_fused_tanh_10.run(buf17, primals_15, 1, grid=grid(1), stream=stream0) del primals_15 return (buf13, buf17, buf0, buf1, buf2, buf3, primals_8, primals_12, buf5, buf7, buf9, reinterpret_tensor(buf11, (1, 256), (256, 1), 0), buf13, reinterpret_tensor(buf15, (1, 128), (128, 1), 0), buf17, primals_14, buf18, primals_10, buf19, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(4, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.p_fc1 = nn.Conv2d(128, 4, 1) self.p_fc2 = nn.Linear(4 * 8 * 8, 64) self.v_fc1 = nn.Conv2d(128, 2, 1) self.v_fc2 = nn.Linear(2 * 8 * 8, 1) def forward(self, x): a1 = F.relu(self.conv1(x)) a2 = F.relu(self.conv2(a1)) a3 = F.relu(self.conv3(a2)) p1 = F.relu(self.p_fc1(a3)) p_act = p1.view(-1, 4 * 8 * 8) p_out = F.softmax(self.p_fc2(p_act), dim=0) v1 = F.relu(self.v_fc1(a3)) v_act = v1.view(-1, 2 * 8 * 8) v_out = torch.tanh(self.v_fc2(v_act)) return p_out, v_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused__softmax_8(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tmp0 - tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = tmp2 / tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 8 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 2 * x2 + 32 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_tanh_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr0 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (4, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (64, 256), (256, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_13, (2,), (1,)) assert_size_stride(primals_14, (1, 128), (128, 1)) assert_size_stride(primals_15, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 4, 3, 3), (36, 1, 12, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 9)](primals_1, buf0, 128, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch. float32) triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 4, 4), (512, 1, 128, 32)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_4[grid(2048)](buf5, primals_2, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 4, 4), (1024, 1, 256, 64)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_5[grid(4096)](buf7, primals_5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = extern_kernels.convolution(buf7, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 4, 4), (2048, 1, 512, 128)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_6[grid(8192)](buf9, primals_7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 1, 16, 4)) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_7[grid(16, 16)]( buf10, primals_9, buf11, buf19, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf10 del primals_9 buf12 = empty_strided_cuda((1, 64), (64, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (1, 256), (0, 1), 0), reinterpret_tensor(primals_10, (256, 64), (1, 256), 0), alpha=1, beta=1, out=buf12) del primals_11 buf13 = buf12 del buf12 triton_poi_fused__softmax_8[grid(64)](buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 2, 4, 4), (32, 1, 8, 2)) buf15 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) buf18 = empty_strided_cuda((4, 2, 4, 4), (32, 1, 8, 2), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(8, 16)]( buf14, primals_13, buf15, buf18, 8, 16, XBLOCK=16, YBLOCK=8, num_warps=4, num_stages=1) del buf14 del primals_13 buf16 = empty_strided_cuda((1, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (1, 128), (0, 1), 0), reinterpret_tensor(primals_14, (128, 1), (1, 128), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_tanh_10[grid(1)](buf17, primals_15, 1, XBLOCK=1, num_warps=1, num_stages=1) del primals_15 return (buf13, buf17, buf0, buf1, buf2, buf3, primals_8, primals_12, buf5, buf7, buf9, reinterpret_tensor(buf11, (1, 256), (256, 1), 0), buf13, reinterpret_tensor(buf15, (1, 128), (128, 1), 0), buf17, primals_14, buf18, primals_10, buf19) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(4, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.p_fc1 = nn.Conv2d(128, 4, 1) self.p_fc2 = nn.Linear(4 * 8 * 8, 64) self.v_fc1 = nn.Conv2d(128, 2, 1) self.v_fc2 = nn.Linear(2 * 8 * 8, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.p_fc1.weight primals_9 = self.p_fc1.bias primals_10 = self.p_fc2.weight primals_11 = self.p_fc2.bias primals_12 = self.v_fc1.weight primals_13 = self.v_fc1.bias primals_14 = self.v_fc2.weight primals_15 = self.v_fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0], output[1]
LouisCaixuran/gomoku
Net
false
5,585
[ "Apache-2.0" ]
1
c1b6d508522d9e8c78be827f326bbee54c4dfd8b
https://github.com/LouisCaixuran/gomoku/tree/c1b6d508522d9e8c78be827f326bbee54c4dfd8b
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(4, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.p_fc1 = nn.Conv2d(128, 4, 1) self.p_fc2 = nn.Linear(4 * 8 * 8, 64) self.v_fc1 = nn.Conv2d(128, 2, 1) self.v_fc2 = nn.Linear(2 * 8 * 8, 1) def forward(self, x): a1 = F.relu(self.conv1(x)) a2 = F.relu(self.conv2(a1)) a3 = F.relu(self.conv3(a2)) p1 = F.relu(self.p_fc1(a3)) p_act = p1.view(-1, 4 * 8 * 8) p_out = F.softmax(self.p_fc2(p_act), dim=0) v1 = F.relu(self.v_fc1(a3)) v_act = v1.view(-1, 2 * 8 * 8) v_out = torch.tanh(self.v_fc2(v_act)) return p_out, v_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Binarizer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/eu/ceu5kmb7clcwhnin7efniq3cgipbjbiqe7glsxnqzjv4kv5aelk4.py # Topologically Sorted Source Nodes: [x_1, autograd_function_apply], Original ATen: [aten.tanh, aten.sign] # Source node to ATen node mapping: # autograd_function_apply => sign # x_1 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {}) # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%tanh,), kwargs = {}) triton_poi_fused_sign_tanh_0 = async_compile.triton('triton_poi_fused_sign_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sign_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sign_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = tmp2 < tmp1 tmp4 = tmp3.to(tl.int8) tmp5 = tmp1 < tmp2 tmp6 = tmp5.to(tl.int8) tmp7 = tmp4 - tmp6 tmp8 = tmp7.to(tmp1.dtype) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, autograd_function_apply], Original ATen: [aten.tanh, aten.sign] stream0 = get_raw_stream(0) triton_poi_fused_sign_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, primals_1, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import torch import torch.nn as nn import torch.nn.functional as F class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super(Sign, self).__init__() def forward(self, x): return SignFunction.apply(x, self.training) class Binarizer(nn.Module): def __init__(self, in_channels, out_channels): super(Binarizer, self).__init__() self.sign = Sign() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) def forward(self, x): x = self.conv1(x) x = F.tanh(x) return self.sign(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sign_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = tmp2 < tmp1 tmp4 = tmp3.to(tl.int8) tmp5 = tmp1 < tmp2 tmp6 = tmp5.to(tl.int8) tmp7 = tmp4 - tmp6 tmp8 = tmp7.to(tmp1.dtype) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sign_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf0 class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super(Sign, self).__init__() def forward(self, x): return SignFunction.apply(x, self.training) class BinarizerNew(nn.Module): def __init__(self, in_channels, out_channels): super(BinarizerNew, self).__init__() self.sign = Sign() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
MeMihir/SuperResCompression
Binarizer
false
5,586
[ "MIT" ]
1
c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
https://github.com/MeMihir/SuperResCompression/tree/c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
from torch.autograd import Function import torch import torch.nn as nn import torch.nn.functional as F class SignFunction(Function): def __init__(self): super().__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super().__init__() def forward(self, x): return SignFunction.apply(x, self.training) class Model(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.sign = Sign() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) def forward(self, x): x = self.conv1(x) x = F.tanh(x) return self.sign(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Sign
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zy/czyscnnxj3g3lyyjhg3q4airae4625yoe5ygsjf2qxbxvi3gpiqn.py # Topologically Sorted Source Nodes: [sign], Original ATen: [aten.sign] # Source node to ATen node mapping: # sign => sign # Graph fragment: # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_sign_0 = async_compile.triton('triton_poi_fused_sign_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sign_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sign], Original ATen: [aten.sign] stream0 = get_raw_stream(0) triton_poi_fused_sign_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import torch import torch.nn as nn class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super(Sign, self).__init__() def forward(self, x): return SignFunction.apply(x, self.training) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sign_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class SignNew(nn.Module): def __init__(self): super(SignNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MeMihir/SuperResCompression
Sign
false
5,587
[ "MIT" ]
1
c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
https://github.com/MeMihir/SuperResCompression/tree/c76bcf6b12d56ce3ad81ebb1b204fc0425f0e633
from torch.autograd import Function import torch import torch.nn as nn class SignFunction(Function): def __init__(self): super().__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return SignFunction.apply(x, self.training) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/j6/cj63ypsp5wd4xpbcgdrjj2sjbi74adsw4ajccnbd2ift6xmplwm2.py # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GLU(nn.Module): def __init__(self, input_channel, output_channel): super(GLU, self).__init__() self.linear_left = nn.Linear(input_channel, output_channel) self.linear_right = nn.Linear(input_channel, output_channel) def forward(self, x): return torch.mul(self.linear_left(x), torch.sigmoid(self. linear_right(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1 class GLUNew(nn.Module): def __init__(self, input_channel, output_channel): super(GLUNew, self).__init__() self.linear_left = nn.Linear(input_channel, output_channel) self.linear_right = nn.Linear(input_channel, output_channel) def forward(self, input_0): primals_1 = self.linear_left.weight primals_2 = self.linear_left.bias primals_4 = self.linear_right.weight primals_5 = self.linear_right.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
MichaelHopwood/GLRM
GLU
false
5,588
[ "MIT" ]
1
80930762e6964afb8ef0db9e5ae3a10cfcc975b2
https://github.com/MichaelHopwood/GLRM/tree/80930762e6964afb8ef0db9e5ae3a10cfcc975b2
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_channel, output_channel): super().__init__() self.linear_left = nn.Linear(input_channel, output_channel) self.linear_right = nn.Linear(input_channel, output_channel) def forward(self, x): return torch.mul(self.linear_left(x), torch.sigmoid(self. linear_right(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
AverageAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/75/c75w3rgnfmm4c7hp5div65urlkb5kzh2656pt75swmio7vzn3vp3.py # Topologically Sorted Source Nodes: [ones, triangle, mask], Original ATen: [aten.ones, aten.tril, aten.mul] # Source node to ATen node mapping: # mask => mul_1 # ones => full_default # triangle => full_default_1, le, sub, where # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%sub, 0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %permute), kwargs = {}) triton_poi_fused_mul_ones_tril_0 = async_compile.triton('triton_poi_fused_mul_ones_tril_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_ones_tril_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_ones_tril_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 + ((-1)*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = 1 + x1 tmp7 = tmp6.to(tl.float32) tmp8 = tmp3 / tmp7 tmp9 = tmp5 * tmp8 tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ha/chavpwdtejkyqus2olvrr56v6fhdolpm5dx6l26ahmwfvz664fnv.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %bmm], -1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bj/cbjkk5x2yiy67l3q4l7ooe5u7plvwkualpweocfe25rsydr62zek.py # Topologically Sorted Source Nodes: [sigmoid, mul_1, sigmoid_1, mul_2, gating_outputs_1], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # gating_outputs_1 => add_1 # mul_1 => mul_2 # mul_2 => mul_3 # sigmoid => sigmoid # sigmoid_1 => sigmoid_1 # Graph fragment: # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_1), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %bmm), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_2), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tmp8 = tmp6 + tmp7 tmp9 = tl.sigmoid(tmp8) tmp11 = tmp9 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp9 tmp15 = tmp9 * tmp14 tmp16 = tmp13 - tmp3 tmp17 = tmp3 * tmp16 tl.store(out_ptr0 + (x2), tmp12, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) tl.store(out_ptr2 + (x2), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 8), (8, 1)) assert_size_stride(primals_3, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ones, triangle, mask], Original ATen: [aten.ones, aten.tril, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_ones_tril_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [average_outputs], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (0, 4, 1), 0), primals_1, out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, buf1, buf2, 128, grid=grid(128), stream=stream0) buf3 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul_1, sigmoid_1, mul_2, gating_outputs_1], Original ATen: [aten.sigmoid, aten.mul, aten.add, aten.sigmoid_backward] triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2.run(buf3, primals_3, primals_1, buf1, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) del buf3 del primals_3 return (buf4, buf1, primals_1, buf1, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class ActivationFunction(object): relu = 'relu' gelu = 'gelu' class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. activation_fn (ActivationFunction): activation function used. """ def __init__(self, d_model, d_ff, dropout=0.1, activation_fn= ActivationFunction.relu): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.activation = ACTIVATION_FUNCTIONS[activation_fn] self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """Layer definition. Args: x: ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor): Output ``(batch_size, input_len, model_dim)``. """ inter = self.dropout_1(self.activation(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def update_dropout(self, dropout): self.dropout_1.p = dropout self.dropout_2.p = dropout class AverageAttention(nn.Module): """ Average Attention module from "Accelerating Neural Transformer via an Average Attention Network" :cite:`DBLP:journals/corr/abs-1805-00631`. Args: model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter pos_ffn_activation_fn (ActivationFunction): activation function choice for PositionwiseFeedForward layer """ def __init__(self, model_dim, dropout=0.1, aan_useffn=False, pos_ffn_activation_fn=ActivationFunction.relu): self.model_dim = model_dim self.aan_useffn = aan_useffn super(AverageAttention, self).__init__() if aan_useffn: self.average_layer = PositionwiseFeedForward(model_dim, model_dim, dropout, pos_ffn_activation_fn) self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2) def cumulative_average_mask(self, batch_size, inputs_len, device): """ Builds the mask to compute the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3 Args: batch_size (int): batch size inputs_len (int): length of the inputs Returns: (FloatTensor): * A Tensor of shape ``(batch_size, input_len, input_len)`` """ triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype= torch.float, device=device)) weights = torch.ones(1, inputs_len, dtype=torch.float, device=device ) / torch.arange(1, inputs_len + 1, dtype=torch.float, device= device) mask = triangle * weights.transpose(0, 1) return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) def cumulative_average(self, inputs, mask_or_step, layer_cache=None, step=None): """ Computes the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6) Args: inputs (FloatTensor): sequence to average ``(batch_size, input_len, dimension)`` mask_or_step: if cache is set, this is assumed to be the current step of the dynamic decoding. Otherwise, it is the mask matrix used to compute the cumulative average. layer_cache: a dictionary containing the cumulative average of the previous step. Returns: a tensor of the same shape and type as ``inputs``. """ if layer_cache is not None: step = mask_or_step average_attention = (inputs + step * layer_cache['prev_g']) / (step + 1) layer_cache['prev_g'] = average_attention return average_attention else: mask = mask_or_step return torch.matmul(mask, inputs) def forward(self, inputs, mask=None, layer_cache=None, step=None): """ Args: inputs (FloatTensor): ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor, FloatTensor): * gating_outputs ``(batch_size, input_len, model_dim)`` * average_outputs average attention ``(batch_size, input_len, model_dim)`` """ batch_size = inputs.size(0) inputs_len = inputs.size(1) average_outputs = self.cumulative_average(inputs, self. cumulative_average_mask(batch_size, inputs_len, inputs.device) if layer_cache is None else step, layer_cache=layer_cache) if self.aan_useffn: average_outputs = self.average_layer(average_outputs) gating_outputs = self.gating_layer(torch.cat((inputs, average_outputs), -1)) input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2) gating_outputs = torch.sigmoid(input_gate) * inputs + torch.sigmoid( forget_gate) * average_outputs return gating_outputs, average_outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_ones_tril_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 + -1 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 <= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = 1 + x1 tmp7 = tmp6.to(tl.float32) tmp8 = tmp3 / tmp7 tmp9 = tmp5 * tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x2, xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tmp8 = tmp6 + tmp7 tmp9 = tl.sigmoid(tmp8) tmp11 = tmp9 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = 1.0 tmp14 = tmp13 - tmp9 tmp15 = tmp9 * tmp14 tmp16 = tmp13 - tmp3 tmp17 = tmp3 * tmp16 tl.store(out_ptr0 + x2, tmp12, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr2 + x2, tmp17, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 8), (8, 1)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_ones_tril_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (0, 4, 1), 0 ), primals_1, out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_1, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_2[grid(64)](buf3, primals_3, primals_1, buf1, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_3 return buf4, buf1, primals_1, buf1, reinterpret_tensor(buf2, (16, 8), ( 8, 1), 0), buf5, buf6 class ActivationFunction(object): relu = 'relu' gelu = 'gelu' class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. activation_fn (ActivationFunction): activation function used. """ def __init__(self, d_model, d_ff, dropout=0.1, activation_fn= ActivationFunction.relu): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.activation = ACTIVATION_FUNCTIONS[activation_fn] self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """Layer definition. Args: x: ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor): Output ``(batch_size, input_len, model_dim)``. """ inter = self.dropout_1(self.activation(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def update_dropout(self, dropout): self.dropout_1.p = dropout self.dropout_2.p = dropout class AverageAttentionNew(nn.Module): """ Average Attention module from "Accelerating Neural Transformer via an Average Attention Network" :cite:`DBLP:journals/corr/abs-1805-00631`. Args: model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter pos_ffn_activation_fn (ActivationFunction): activation function choice for PositionwiseFeedForward layer """ def __init__(self, model_dim, dropout=0.1, aan_useffn=False, pos_ffn_activation_fn=ActivationFunction.relu): self.model_dim = model_dim self.aan_useffn = aan_useffn super(AverageAttentionNew, self).__init__() if aan_useffn: self.average_layer = PositionwiseFeedForward(model_dim, model_dim, dropout, pos_ffn_activation_fn) self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2) def cumulative_average_mask(self, batch_size, inputs_len, device): """ Builds the mask to compute the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3 Args: batch_size (int): batch size inputs_len (int): length of the inputs Returns: (FloatTensor): * A Tensor of shape ``(batch_size, input_len, input_len)`` """ triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype= torch.float, device=device)) weights = torch.ones(1, inputs_len, dtype=torch.float, device=device ) / torch.arange(1, inputs_len + 1, dtype=torch.float, device= device) mask = triangle * weights.transpose(0, 1) return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) def cumulative_average(self, inputs, mask_or_step, layer_cache=None, step=None): """ Computes the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6) Args: inputs (FloatTensor): sequence to average ``(batch_size, input_len, dimension)`` mask_or_step: if cache is set, this is assumed to be the current step of the dynamic decoding. Otherwise, it is the mask matrix used to compute the cumulative average. layer_cache: a dictionary containing the cumulative average of the previous step. Returns: a tensor of the same shape and type as ``inputs``. """ if layer_cache is not None: step = mask_or_step average_attention = (inputs + step * layer_cache['prev_g']) / (step + 1) layer_cache['prev_g'] = average_attention return average_attention else: mask = mask_or_step return torch.matmul(mask, inputs) def forward(self, input_0): primals_2 = self.gating_layer.weight primals_3 = self.gating_layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
MaxatTezekbayev/OpenNMT-py-lexical
AverageAttention
false
5,589
[ "MIT" ]
1
44182999b863fc4074d67e0281c5bdab19abddfe
https://github.com/MaxatTezekbayev/OpenNMT-py-lexical/tree/44182999b863fc4074d67e0281c5bdab19abddfe
import torch import torch.nn as nn import torch.cuda import torch.distributed class ActivationFunction(object): relu = 'relu' gelu = 'gelu' class PositionwiseFeedForward(nn.Module): """ A two-layer Feed-Forward-Network with residual layer norm. Args: d_model (int): the size of input for the first-layer of the FFN. d_ff (int): the hidden layer size of the second-layer of the FNN. dropout (float): dropout probability in :math:`[0, 1)`. activation_fn (ActivationFunction): activation function used. """ def __init__(self, d_model, d_ff, dropout=0.1, activation_fn= ActivationFunction.relu): super().__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=1e-06) self.dropout_1 = nn.Dropout(dropout) self.activation = ACTIVATION_FUNCTIONS[activation_fn] self.dropout_2 = nn.Dropout(dropout) def forward(self, x): """Layer definition. Args: x: ``(batch_size, input_len, model_dim)`` Returns: (FloatTensor): Output ``(batch_size, input_len, model_dim)``. """ inter = self.dropout_1(self.activation(self.w_1(self.layer_norm(x)))) output = self.dropout_2(self.w_2(inter)) return output + x def update_dropout(self, dropout): self.dropout_1.p = dropout self.dropout_2.p = dropout class Model(nn.Module): """ Average Attention module from "Accelerating Neural Transformer via an Average Attention Network" :cite:`DBLP:journals/corr/abs-1805-00631`. Args: model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter pos_ffn_activation_fn (ActivationFunction): activation function choice for PositionwiseFeedForward layer """ def __init__(self, model_dim, dropout=0.1, aan_useffn=False, pos_ffn_activation_fn=ActivationFunction.relu): self.model_dim = model_dim self.aan_useffn = aan_useffn super().__init__() if aan_useffn: self.average_layer = PositionwiseFeedForward(model_dim, model_dim, dropout, pos_ffn_activation_fn) self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2) def cumulative_average_mask(self, batch_size, inputs_len, device): """ Builds the mask to compute the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3 Args: batch_size (int): batch size inputs_len (int): length of the inputs Returns: (FloatTensor): * A Tensor of shape ``(batch_size, input_len, input_len)`` """ triangle = torch.tril(torch.ones(inputs_len, inputs_len, dtype= torch.float, device=device)) weights = torch.ones(1, inputs_len, dtype=torch.float, device=device ) / torch.arange(1, inputs_len + 1, dtype=torch.float, device= device) mask = triangle * weights.transpose(0, 1) return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) def cumulative_average(self, inputs, mask_or_step, layer_cache=None, step=None): """ Computes the cumulative average as described in :cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6) Args: inputs (FloatTensor): sequence to average ``(batch_size, input_len, dimension)`` mask_or_step: if cache is set, this is assumed to be the current step of the dynamic decoding. Otherwise, it is the mask matrix used to compute the cumulative average. layer_cache: a dictionary containing the cumulative average of the previous step. Returns: # ... truncated (>4000 chars) for memory efficiency
SRNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/of/coferfgb6zeb7ewd25keilubhznpkefhirehs3f5p2bmxsjdmmrt.py # Topologically Sorted Source Nodes: [x_7, SR_img], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # SR_img => add # x_7 => convolution_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %convolution_7), kwargs = {}) triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_out_ptr0 + (x3), None) tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64, ), (1, )) assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_13, (64, ), (1, )) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 1048576, grid=grid(1048576), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf7, primals_9, 1048576, grid=grid(1048576), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_4, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf9, primals_11, 1048576, grid=grid(1048576), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [conv2d_5, x_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf11, primals_13, 1048576, grid=grid(1048576), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_6, x_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf13, primals_15, 1048576, grid=grid(1048576), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [x_7, SR_img], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_1.run(buf15, primals_3, primals_17, 49152, grid=grid(49152), stream=stream0) del primals_17 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf5, buf7, buf9, buf11, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class SRNet(nn.Module): def __init__(self): super(SRNet, self).__init__() self.relu = nn.ReLU(inplace=True) self.Conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=True) self.Conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv8 = nn.Conv2d(64, 3, 3, 1, 1, bias=True) def forward(self, LR_img): x = self.relu(self.Conv1(LR_img)) x = self.relu(self.Conv2(x)) x = self.relu(self.Conv3(x)) x = self.relu(self.Conv4(x)) x = self.relu(self.Conv5(x)) x = self.relu(self.Conv6(x)) x = self.relu(self.Conv7(x)) x = self.Conv8(x) SR_img = LR_img + x return SR_img def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(1048576)](buf5, primals_7, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_0[grid(1048576)](buf7, primals_9, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_0[grid(1048576)](buf9, primals_11, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_0[grid(1048576)](buf11, primals_13, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_0[grid(1048576)](buf13, primals_15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_add_convolution_1[grid(49152)](buf15, primals_3, primals_17, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf5, buf7, buf9, buf11, buf13) class SRNetNew(nn.Module): def __init__(self): super(SRNetNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.Conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=True) self.Conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv8 = nn.Conv2d(64, 3, 3, 1, 1, bias=True) def forward(self, input_0): primals_1 = self.Conv1.weight primals_2 = self.Conv1.bias primals_4 = self.Conv2.weight primals_5 = self.Conv2.bias primals_6 = self.Conv3.weight primals_7 = self.Conv3.bias primals_8 = self.Conv4.weight primals_9 = self.Conv4.bias primals_10 = self.Conv5.weight primals_11 = self.Conv5.bias primals_12 = self.Conv6.weight primals_13 = self.Conv6.bias primals_14 = self.Conv7.weight primals_15 = self.Conv7.bias primals_16 = self.Conv8.weight primals_17 = self.Conv8.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
MayankSingal/PyTorch-Zero-Shot-Super-Resolution
SRNet
false
5,590
[ "MIT" ]
1
3521b02fd338fc90eef88c551a8bed4afc54c8c6
https://github.com/MayankSingal/PyTorch-Zero-Shot-Super-Resolution/tree/3521b02fd338fc90eef88c551a8bed4afc54c8c6
import torch import torch.nn as nn import torch.optim class Model(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU(inplace=True) self.Conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=True) self.Conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=True) self.Conv8 = nn.Conv2d(64, 3, 3, 1, 1, bias=True) def forward(self, LR_img): x = self.relu(self.Conv1(LR_img)) x = self.relu(self.Conv2(x)) x = self.relu(self.Conv3(x)) x = self.relu(self.Conv4(x)) x = self.relu(self.Conv5(x)) x = self.relu(self.Conv6(x)) x = self.relu(self.Conv7(x)) x = self.Conv8(x) SR_img = LR_img + x return SR_img def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return []
Sparsemax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/je/cjetqbcnd7bc32n35tugus5ijwdxgd2rgnx55splotkzsvjkv2mu.py # Topologically Sorted Source Nodes: [input_2, sort, cumulative_sum_zs], Original ATen: [aten.sub, aten.sort, aten.cumsum] # Source node to ATen node mapping: # cumulative_sum_zs => cumsum # input_2 => sub # sort => sort # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %expand), kwargs = {}) # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%sub, 1, True), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%getitem_2, 1), kwargs = {}) triton_per_fused_cumsum_sort_sub_0 = async_compile.triton('triton_per_fused_cumsum_sort_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_sort_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cumsum_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = r1 tmp10 = tmp9.to(tl.int16) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13, tmp14, = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True) tmp15 = tmp13.to(tl.float32) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + (4*x0)), tmp8, xmask) tl.store(out_ptr1 + (r1 + (4*x0)), tmp13, xmask) tl.store(out_ptr2 + (r1 + (4*x0)), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rm/crmbpyujmwscp5mkcahmeli47zzzh5qct73vmu2ldmazrzxmqpsx.py # Topologically Sorted Source Nodes: [mul, bound, gt, is_gt, mul_1, max_2, zs_sparse, sum_1], Original ATen: [aten.mul, aten.add, aten.gt, aten._to_copy, aten.max, aten.sum] # Source node to ATen node mapping: # bound => add # gt => gt # is_gt => convert_element_type # max_2 => max_2 # mul => mul # mul_1 => mul_1 # sum_1 => sum_1 # zs_sparse => mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_1, %getitem_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%add, %cumsum), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %expand_1), kwargs = {}) # %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul_1, 1, True), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %getitem_2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1], True), kwargs = {}) triton_poi_fused__to_copy_add_gt_max_mul_sum_1 = async_compile.triton('triton_poi_fused__to_copy_add_gt_max_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_gt_max_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_gt_max_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp12 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2)) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp22 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (3)) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp32 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp7 = tmp5 > tmp6 tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp1 tmp13 = tmp11 * tmp12 tmp14 = tmp13 + tmp4 tmp16 = tmp14 > tmp15 tmp17 = tmp16.to(tl.float32) tmp18 = tmp17 * tmp11 tmp19 = triton_helpers.maximum(tmp9, tmp18) tmp23 = tmp21 * tmp22 tmp24 = tmp23 + tmp4 tmp26 = tmp24 > tmp25 tmp27 = tmp26.to(tl.float32) tmp28 = tmp27 * tmp21 tmp29 = triton_helpers.maximum(tmp19, tmp28) tmp33 = tmp31 * tmp32 tmp34 = tmp33 + tmp4 tmp36 = tmp34 > tmp35 tmp37 = tmp36.to(tl.float32) tmp38 = tmp37 * tmp31 tmp39 = triton_helpers.maximum(tmp29, tmp38) tmp40 = tmp8 * tmp2 tmp41 = tmp17 * tmp12 tmp42 = tmp40 + tmp41 tmp43 = tmp27 * tmp22 tmp44 = tmp42 + tmp43 tmp45 = tmp37 * tmp32 tmp46 = tmp44 + tmp45 tl.store(out_ptr0 + (x0), tmp39, xmask) tl.store(out_ptr1 + (x0), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xz/cxze32rtorhwy2jkafxfvxxeyi2jg24j22ihfkfwtfd22dqgy2go.py # Topologically Sorted Source Nodes: [zeros_like, sub_2, max_3], Original ATen: [aten.zeros_like, aten.sub, aten.maximum] # Source node to ATen node mapping: # max_3 => maximum # sub_2 => sub_2 # zeros_like => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([64, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %expand_2), kwargs = {}) # %maximum : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%full_default, %sub_2), kwargs = {}) triton_poi_fused_maximum_sub_zeros_like_2 = async_compile.triton('triton_poi_fused_maximum_sub_zeros_like_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_sub_zeros_like_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_maximum_sub_zeros_like_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 - tmp2 tmp5 = tmp3 / tmp4 tmp6 = tmp0 - tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp7, tmp6) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_2, sort, cumulative_sum_zs], Original ATen: [aten.sub, aten.sort, aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused_cumsum_sort_sub_0.run(arg0_1, buf0, buf1, buf5, 64, 4, grid=grid(64), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [range_1], Original ATen: [aten.range] buf3 = torch.ops.aten.range.step(1, 4, dtype=torch.float32, layout=torch.strided, device=device(type='cuda', index=0)) buf4 = buf3 del buf3 buf6 = empty_strided_cuda((64, 1), (1, 64), torch.float32) buf7 = empty_strided_cuda((64, 1), (1, 64), torch.float32) # Topologically Sorted Source Nodes: [mul, bound, gt, is_gt, mul_1, max_2, zs_sparse, sum_1], Original ATen: [aten.mul, aten.add, aten.gt, aten._to_copy, aten.max, aten.sum] triton_poi_fused__to_copy_add_gt_max_mul_sum_1.run(buf4, buf1, buf5, buf6, buf7, 64, grid=grid(64), stream=stream0) del buf1 del buf4 buf8 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [zeros_like, sub_2, max_3], Original ATen: [aten.zeros_like, aten.sub, aten.maximum] triton_poi_fused_maximum_sub_zeros_like_2.run(buf0, buf7, buf6, buf8, 256, grid=grid(256), stream=stream0) del buf0 del buf6 del buf7 return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Sparsemax(nn.Module): """Sparsemax function.""" def __init__(self, dim=None): """Initialize sparsemax activation Args: dim (int, optional): The dimension over which to apply the sparsemax function. """ super(Sparsemax, self).__init__() self.dim = -1 if dim is None else dim def forward(self, input): """Forward function. Args: input (torch.Tensor): Input tensor. First dimension should be the batch size Returns: torch.Tensor: [batch_size x number_of_logits] Output tensor """ original_size = input.size() input = input.view(-1, input.size(self.dim)) dim = 1 number_of_logits = input.size(dim) input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as( input) zs = torch.sort(input=input, dim=dim, descending=True)[0] range = torch.range(start=1, end=number_of_logits, device=input.device ).view(1, -1) range = range.expand_as(zs) bound = 1 + range * zs cumulative_sum_zs = torch.cumsum(zs, dim) is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type()) k = torch.max(is_gt * range, dim, keepdim=True)[0] zs_sparse = is_gt * zs taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k taus = taus.expand_as(input) self.output = torch.max(torch.zeros_like(input), input - taus) output = self.output.view(original_size) return output def backward(self, grad_output): """Backward function.""" dim = 1 nonzeros = torch.ne(self.output, 0) sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim) self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output)) return self.grad_input def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = r1 tmp10 = tmp9.to(tl.int16) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True) tmp15 = tmp13.to(tl.float32) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp8, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask) tl.store(out_ptr2 + (r1 + 4 * x0), tmp17, xmask) @triton.jit def triton_poi_fused__to_copy_add_gt_max_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + 1) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + 2) tmp21 = tl.broadcast_to(tmp20, [XBLOCK]) tmp22 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr0 + 3) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp32 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 + tmp4 tmp7 = tmp5 > tmp6 tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp1 tmp13 = tmp11 * tmp12 tmp14 = tmp13 + tmp4 tmp16 = tmp14 > tmp15 tmp17 = tmp16.to(tl.float32) tmp18 = tmp17 * tmp11 tmp19 = triton_helpers.maximum(tmp9, tmp18) tmp23 = tmp21 * tmp22 tmp24 = tmp23 + tmp4 tmp26 = tmp24 > tmp25 tmp27 = tmp26.to(tl.float32) tmp28 = tmp27 * tmp21 tmp29 = triton_helpers.maximum(tmp19, tmp28) tmp33 = tmp31 * tmp32 tmp34 = tmp33 + tmp4 tmp36 = tmp34 > tmp35 tmp37 = tmp36.to(tl.float32) tmp38 = tmp37 * tmp31 tmp39 = triton_helpers.maximum(tmp29, tmp38) tmp40 = tmp8 * tmp2 tmp41 = tmp17 * tmp12 tmp42 = tmp40 + tmp41 tmp43 = tmp27 * tmp22 tmp44 = tmp42 + tmp43 tmp45 = tmp37 * tmp32 tmp46 = tmp44 + tmp45 tl.store(out_ptr0 + x0, tmp39, xmask) tl.store(out_ptr1 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_maximum_sub_zeros_like_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 - tmp2 tmp5 = tmp3 / tmp4 tmp6 = tmp0 - tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp7, tmp6) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_sort_sub_0[grid(64)](arg0_1, buf0, buf1, buf5, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 buf3 = torch.ops.aten.range.step(1, 4, dtype=torch.float32, layout= torch.strided, device=device(type='cuda', index=0)) buf4 = buf3 del buf3 buf6 = empty_strided_cuda((64, 1), (1, 64), torch.float32) buf7 = empty_strided_cuda((64, 1), (1, 64), torch.float32) triton_poi_fused__to_copy_add_gt_max_mul_sum_1[grid(64)](buf4, buf1, buf5, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del buf4 buf8 = buf5 del buf5 triton_poi_fused_maximum_sub_zeros_like_2[grid(256)](buf0, buf7, buf6, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf6 del buf7 return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf8 class SparsemaxNew(nn.Module): """Sparsemax function.""" def __init__(self, dim=None): """Initialize sparsemax activation Args: dim (int, optional): The dimension over which to apply the sparsemax function. """ super(SparsemaxNew, self).__init__() self.dim = -1 if dim is None else dim def backward(self, grad_output): """Backward function.""" dim = 1 nonzeros = torch.ne(self.output, 0) sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim) self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output)) return self.grad_input def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Max-luo-song/fs-map-project
Sparsemax
false
5,591
[ "Apache-2.0" ]
1
4e9d86e182d9a4b969e86b12d72f227e4fd4fd09
https://github.com/Max-luo-song/fs-map-project/tree/4e9d86e182d9a4b969e86b12d72f227e4fd4fd09
import torch import torch.nn as nn class Model(nn.Module): """Sparsemax function.""" def __init__(self, dim=None): """Initialize sparsemax activation Args: dim (int, optional): The dimension over which to apply the sparsemax function. """ super().__init__() self.dim = -1 if dim is None else dim def forward(self, input): """Forward function. Args: input (torch.Tensor): Input tensor. First dimension should be the batch size Returns: torch.Tensor: [batch_size x number_of_logits] Output tensor """ original_size = input.size() input = input.view(-1, input.size(self.dim)) dim = 1 number_of_logits = input.size(dim) input = input - torch.max(input, dim=dim, keepdim=True)[0].expand_as( input) zs = torch.sort(input=input, dim=dim, descending=True)[0] range = torch.range(start=1, end=number_of_logits, device=input.device ).view(1, -1) range = range.expand_as(zs) bound = 1 + range * zs cumulative_sum_zs = torch.cumsum(zs, dim) is_gt = torch.gt(bound, cumulative_sum_zs).type(input.type()) k = torch.max(is_gt * range, dim, keepdim=True)[0] zs_sparse = is_gt * zs taus = (torch.sum(zs_sparse, dim, keepdim=True) - 1) / k taus = taus.expand_as(input) self.output = torch.max(torch.zeros_like(input), input - taus) output = self.output.view(original_size) return output def backward(self, grad_output): """Backward function.""" dim = 1 nonzeros = torch.ne(self.output, 0) sum = torch.sum(grad_output * nonzeros, dim=dim) / torch.sum(nonzeros, dim=dim) self.grad_input = nonzeros * (grad_output - sum.expand_as(grad_output)) return self.grad_input def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
NTXent
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [emb], Original ATen: [aten.div] # Source node to ATen node mapping: # emb => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lv/clvvd5zl3dk2doqn4hqb7xws2n2pplfd2c6z2bjtqzpgmwmcb6bi.py # Topologically Sorted Source Nodes: [sim_mat], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] # Source node to ATen node mapping: # sim_mat => clamp_min_1, clamp_min_2, div_1, div_2, mul, pow_3, pow_4, pow_5, pow_6, sum_2, sum_3 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand_2, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-08), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand_2, %clamp_min_1), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand_1, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [2], True), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_6, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand_1, %clamp_min_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %div_1), kwargs = {}) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 256) x4 = xindex % 64 x0 = xindex % 16 x6 = xindex % 256 x2 = (xindex // 64) % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x0 + (64*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x3)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x3)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x3)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (x6), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + (x7), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zb/czbffbfo4mbxwezgrsoj7ucwdqx2cq7uw73rnvx3wpkm4bu6o5k4.py # Topologically Sorted Source Nodes: [sim_mat, truediv, sim_mat_1, mul, pos, sum_2, inner, log, loss, loss_1], Original ATen: [aten.sum, aten.div, aten.exp, aten.mul, aten.log, aten.neg, aten.mean] # Source node to ATen node mapping: # inner => div_4 # log => log # loss => neg # loss_1 => mean # mul => mul_1 # pos => sum_5 # sim_mat => sum_4 # sim_mat_1 => exp # sum_2 => sum_6 # truediv => div_3 # Graph fragment: # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_3,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %arg1_1), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1]), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_5, %sum_6), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_4,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {}) triton_per_fused_div_exp_log_mean_mul_neg_sum_2 = async_compile.triton('triton_per_fused_div_exp_log_mean_mul_neg_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_log_mean_mul_neg_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_exp_log_mean_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (256*r1)), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + (256*r1)), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + (256*r1)), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + (256*r1)), None) tmp10 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp12 = tl.load(in_ptr0 + (64 + r0 + (256*r1)), None) tmp13 = tl.load(in_ptr0 + (80 + r0 + (256*r1)), None) tmp15 = tl.load(in_ptr0 + (96 + r0 + (256*r1)), None) tmp17 = tl.load(in_ptr0 + (112 + r0 + (256*r1)), None) tmp21 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr0 + (128 + r0 + (256*r1)), None) tmp25 = tl.load(in_ptr0 + (144 + r0 + (256*r1)), None) tmp27 = tl.load(in_ptr0 + (160 + r0 + (256*r1)), None) tmp29 = tl.load(in_ptr0 + (176 + r0 + (256*r1)), None) tmp33 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp36 = tl.load(in_ptr0 + (192 + r0 + (256*r1)), None) tmp37 = tl.load(in_ptr0 + (208 + r0 + (256*r1)), None) tmp39 = tl.load(in_ptr0 + (224 + r0 + (256*r1)), None) tmp41 = tl.load(in_ptr0 + (240 + r0 + (256*r1)), None) tmp45 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 2.0 tmp8 = tmp6 * tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 * tmp10 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp18 * tmp7 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 * tmp21 tmp23 = tmp11 + tmp22 tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp31 = tmp30 * tmp7 tmp32 = tl_math.exp(tmp31) tmp34 = tmp32 * tmp33 tmp35 = tmp23 + tmp34 tmp38 = tmp36 + tmp37 tmp40 = tmp38 + tmp39 tmp42 = tmp40 + tmp41 tmp43 = tmp42 * tmp7 tmp44 = tl_math.exp(tmp43) tmp46 = tmp44 * tmp45 tmp47 = tmp35 + tmp46 tmp48 = tmp9 + tmp20 tmp49 = tmp48 + tmp32 tmp50 = tmp49 + tmp44 tmp51 = tmp47 / tmp50 tmp52 = tl_math.log(tmp51) tmp53 = -tmp52 tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK]) tmp56 = tl.sum(tmp54, 1)[:, None] tmp57 = 64.0 tmp58 = tmp56 / tmp57 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp58, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [emb], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim_mat], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) del buf0 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [sim_mat, truediv, sim_mat_1, mul, pos, sum_2, inner, log, loss, loss_1], Original ATen: [aten.sum, aten.div, aten.exp, aten.mul, aten.log, aten.neg, aten.mean] triton_per_fused_div_exp_log_mean_mul_neg_sum_2.run(buf5, buf1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf1 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class NTXent(nn.Module): def __init__(self, metric: 'str'='CosineSimilarity', temperature: 'float'=0.5, reduction: 'str'='mean'): super().__init__() if metric not in ['CosineSimilarity']: raise ValueError('Undefined metric!') if reduction not in ['AvgNonInf', 'mean', 'sum', 'none']: raise ValueError('Undefined reduction!') self.metric = metric self.t = temperature self.reduction = reduction def forward(self, embedding, label): if self.metric == 'CosineSimilarity': emb = F.normalize(embedding, dim=1) sim_mat = F.cosine_similarity(emb.unsqueeze(1), emb.unsqueeze(0 ), dim=2) sim_mat = torch.exp(sim_mat / self.t) pos = torch.sum(sim_mat * label, dim=1) inner = pos / torch.sum(sim_mat, dim=1) loss = -torch.log(inner) if self.reduction == 'AvgNonInf': non_inf = inner > 0 loss = loss * non_inf loss = loss.sum() / torch.sum(non_inf) if torch.sum(non_inf ) > 0 else loss.mean() elif self.reduction == 'mean': loss = loss.mean() elif self.reduction == 'sum': loss = loss.sum() elif self.reduction == 'none': pass return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 256 x4 = xindex % 64 x0 = xindex % 16 x6 = xindex % 256 x2 = xindex // 64 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (x0 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + x6, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x7, tmp31, xmask) @triton.jit def triton_per_fused_div_exp_log_mean_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 256 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 256 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 256 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 256 * r1), None) tmp10 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp12 = tl.load(in_ptr0 + (64 + r0 + 256 * r1), None) tmp13 = tl.load(in_ptr0 + (80 + r0 + 256 * r1), None) tmp15 = tl.load(in_ptr0 + (96 + r0 + 256 * r1), None) tmp17 = tl.load(in_ptr0 + (112 + r0 + 256 * r1), None) tmp21 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr0 + (128 + r0 + 256 * r1), None) tmp25 = tl.load(in_ptr0 + (144 + r0 + 256 * r1), None) tmp27 = tl.load(in_ptr0 + (160 + r0 + 256 * r1), None) tmp29 = tl.load(in_ptr0 + (176 + r0 + 256 * r1), None) tmp33 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp36 = tl.load(in_ptr0 + (192 + r0 + 256 * r1), None) tmp37 = tl.load(in_ptr0 + (208 + r0 + 256 * r1), None) tmp39 = tl.load(in_ptr0 + (224 + r0 + 256 * r1), None) tmp41 = tl.load(in_ptr0 + (240 + r0 + 256 * r1), None) tmp45 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 2.0 tmp8 = tmp6 * tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 * tmp10 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp18 * tmp7 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 * tmp21 tmp23 = tmp11 + tmp22 tmp26 = tmp24 + tmp25 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp31 = tmp30 * tmp7 tmp32 = tl_math.exp(tmp31) tmp34 = tmp32 * tmp33 tmp35 = tmp23 + tmp34 tmp38 = tmp36 + tmp37 tmp40 = tmp38 + tmp39 tmp42 = tmp40 + tmp41 tmp43 = tmp42 * tmp7 tmp44 = tl_math.exp(tmp43) tmp46 = tmp44 * tmp45 tmp47 = tmp35 + tmp46 tmp48 = tmp9 + tmp20 tmp49 = tmp48 + tmp32 tmp50 = tmp49 + tmp44 tmp51 = tmp47 / tmp50 tmp52 = tl_math.log(tmp51) tmp53 = -tmp52 tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK]) tmp56 = tl.sum(tmp54, 1)[:, None] tmp57 = 64.0 tmp58 = tmp56 / tmp57 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp58, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_1[grid(1024)]( buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_div_exp_log_mean_mul_neg_sum_2[grid(1)](buf5, buf1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf1 return buf5, class NTXentNew(nn.Module): def __init__(self, metric: 'str'='CosineSimilarity', temperature: 'float'=0.5, reduction: 'str'='mean'): super().__init__() if metric not in ['CosineSimilarity']: raise ValueError('Undefined metric!') if reduction not in ['AvgNonInf', 'mean', 'sum', 'none']: raise ValueError('Undefined reduction!') self.metric = metric self.t = temperature self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Meteor-han/ReLMole
NTXent
false
5,592
[ "MIT" ]
1
ec8f2d3ec7b8edb6cd34aede36a980bab3dc35c2
https://github.com/Meteor-han/ReLMole/tree/ec8f2d3ec7b8edb6cd34aede36a980bab3dc35c2
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, metric: 'str'='CosineSimilarity', temperature: 'float'=0.5, reduction: 'str'='mean'): super().__init__() if metric not in ['CosineSimilarity']: raise ValueError('Undefined metric!') if reduction not in ['AvgNonInf', 'mean', 'sum', 'none']: raise ValueError('Undefined reduction!') self.metric = metric self.t = temperature self.reduction = reduction def forward(self, embedding, label): if self.metric == 'CosineSimilarity': emb = F.normalize(embedding, dim=1) sim_mat = F.cosine_similarity(emb.unsqueeze(1), emb.unsqueeze(0 ), dim=2) sim_mat = torch.exp(sim_mat / self.t) pos = torch.sum(sim_mat * label, dim=1) inner = pos / torch.sum(sim_mat, dim=1) loss = -torch.log(inner) if self.reduction == 'AvgNonInf': non_inf = inner > 0 loss = loss * non_inf loss = loss.sum() / torch.sum(non_inf) if torch.sum(non_inf ) > 0 else loss.mean() elif self.reduction == 'mean': loss = loss.mean() elif self.reduction == 'sum': loss = loss.sum() elif self.reduction == 'none': pass return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GlobalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] # Source node to ATen node mapping: # align_vectors => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ip/cip3p4ibqio6uu76ccsemd7wjusq5ptlow3dt2zxzouyuz2sqywf.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %primals_1], 2), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/f5/cf5pnuv5il7avsmzck3quom7r6zvcfuulsdwpzlv2epzfmcgqgwb.py # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_h_2 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x3), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u4/cu4fypgfipklcxtitafatnyqdaatx5tws6qfndqotcy4qivcph6d.py # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # align_vectors_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm] extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [c], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf3, primals_1, buf4, 128, grid=grid(128), stream=stream0) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf2, buf7, 64, grid=grid(64), stream=stream0) del buf2 return (buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.cuda import torch.distributed def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len, device=lengths.device).type_as(lengths ).repeat(batch_size, 1).lt(lengths.unsqueeze(1)) class GlobalAttention(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. .. mermaid:: graph BT A[Query] subgraph RNN C[H 1] D[H 2] E[H N] end F[Attn] G[Output] A --> F C --> F D --> F E --> F C -.-> G D -.-> G E -.-> G F --> G All models compute the output as :math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where :math:`a_j` is the softmax of a score function. Then then apply a projection layer to [q, c]. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`\\text{score}(H_j,q) = H_j^T q` * general: :math:`\\text{score}(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)` Args: dim (int): dimensionality of query and key coverage (bool): use coverage term attn_type (str): type of attention to use, options [dot,general,mlp] attn_func (str): attention function to use, options [softmax,sparsemax] """ def __init__(self, dim, coverage=False, attn_type='dot', attn_func= 'softmax'): super(GlobalAttention, self).__init__() self.dim = dim assert attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type (got {:s}).'.format( attn_type) self.attn_type = attn_type assert attn_func in ['softmax', 'sparsemax' ], 'Please select a valid attention function.' self.attn_func = attn_func if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = nn.Linear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = nn.Linear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def score(self, h_t, h_s): """ Args: h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)`` h_s (FloatTensor): sequence of sources ``(batch, src_len, dim`` Returns: FloatTensor: raw attention scores (unnormalized) for each src index ``(batch, tgt_len, src_len)`` """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = torch.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, source, memory_bank, memory_lengths=None, coverage=None): """ Args: source (FloatTensor): query vectors ``(batch, tgt_len, dim)`` memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)`` memory_lengths (LongTensor): the source context lengths ``(batch,)`` coverage (FloatTensor): None (not supported yet) Returns: (FloatTensor, FloatTensor): * Computed vector ``(tgt_len, batch, dim)`` * Attention distribtutions for each query ``(tgt_len, batch, src_len)`` """ if source.dim() == 2: one_step = True source = source.unsqueeze(1) else: one_step = False batch, source_l, dim = memory_bank.size() batch_, target_l, dim_ = source.size() aeq(batch, batch_) aeq(dim, dim_) aeq(self.dim, dim) if coverage is not None: batch_, source_l_ = coverage.size() aeq(batch, batch_) aeq(source_l, source_l_) if coverage is not None: cover = coverage.view(-1).unsqueeze(1) memory_bank += self.linear_cover(cover).view_as(memory_bank) memory_bank = torch.tanh(memory_bank) align = self.score(source, memory_bank) if memory_lengths is not None: mask = sequence_mask(memory_lengths, max_len=align.size(-1)) mask = mask.unsqueeze(1) align.masked_fill_(~mask, -float('inf')) if self.attn_func == 'softmax': align_vectors = F.softmax(align.view(batch * target_l, source_l ), -1) else: align_vectors = sparsemax(align.view(batch * target_l, source_l ), -1) align_vectors = align_vectors.view(batch, target_l, source_l) c = torch.bmm(align_vectors, memory_bank) concat_c = torch.cat([c, source], 2).view(batch * target_l, dim * 2) attn_h = self.linear_out(concat_c).view(batch, target_l, dim) if self.attn_type in ['general', 'dot']: attn_h = torch.tanh(attn_h) if one_step: attn_h = attn_h.squeeze(1) align_vectors = align_vectors.squeeze(1) batch_, dim_ = attn_h.size() aeq(batch, batch_) aeq(dim, dim_) batch_, source_l_ = align_vectors.size() aeq(batch, batch_) aeq(source_l, source_l_) else: attn_h = attn_h.transpose(0, 1).contiguous() align_vectors = align_vectors.transpose(0, 1).contiguous() target_l_, batch_, dim_ = attn_h.size() aeq(target_l, target_l_) aeq(batch, batch_) aeq(dim, dim_) target_l_, batch_, source_l_ = align_vectors.size() aeq(target_l, target_l_) aeq(batch, batch_) aeq(source_l, source_l_) return attn_h, align_vectors def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x3, tmp1, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5 def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len, device=lengths.device).type_as(lengths ).repeat(batch_size, 1).lt(lengths.unsqueeze(1)) class GlobalAttentionNew(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. .. mermaid:: graph BT A[Query] subgraph RNN C[H 1] D[H 2] E[H N] end F[Attn] G[Output] A --> F C --> F D --> F E --> F C -.-> G D -.-> G E -.-> G F --> G All models compute the output as :math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where :math:`a_j` is the softmax of a score function. Then then apply a projection layer to [q, c]. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`\\text{score}(H_j,q) = H_j^T q` * general: :math:`\\text{score}(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)` Args: dim (int): dimensionality of query and key coverage (bool): use coverage term attn_type (str): type of attention to use, options [dot,general,mlp] attn_func (str): attention function to use, options [softmax,sparsemax] """ def __init__(self, dim, coverage=False, attn_type='dot', attn_func= 'softmax'): super(GlobalAttentionNew, self).__init__() self.dim = dim assert attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type (got {:s}).'.format( attn_type) self.attn_type = attn_type assert attn_func in ['softmax', 'sparsemax' ], 'Please select a valid attention function.' self.attn_func = attn_func if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = nn.Linear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = nn.Linear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def score(self, h_t, h_s): """ Args: h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)`` h_s (FloatTensor): sequence of sources ``(batch, src_len, dim`` Returns: FloatTensor: raw attention scores (unnormalized) for each src index ``(batch, tgt_len, src_len)`` """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = torch.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input_0, input_1): primals_3 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
MaxatTezekbayev/OpenNMT-py-lexical
GlobalAttention
false
5,593
[ "MIT" ]
1
44182999b863fc4074d67e0281c5bdab19abddfe
https://github.com/MaxatTezekbayev/OpenNMT-py-lexical/tree/44182999b863fc4074d67e0281c5bdab19abddfe
import torch import torch.nn as nn import torch.nn.functional as F import torch.cuda import torch.distributed def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len, device=lengths.device).type_as(lengths ).repeat(batch_size, 1).lt(lengths.unsqueeze(1)) class Model(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. .. mermaid:: graph BT A[Query] subgraph RNN C[H 1] D[H 2] E[H N] end F[Attn] G[Output] A --> F C --> F D --> F E --> F C -.-> G D -.-> G E -.-> G F --> G All models compute the output as :math:`c = \\sum_{j=1}^{\\text{SeqLength}} a_j H_j` where :math:`a_j` is the softmax of a score function. Then then apply a projection layer to [q, c]. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`\\text{score}(H_j,q) = H_j^T q` * general: :math:`\\text{score}(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`\\text{score}(H_j, q) = v_a^T \\text{tanh}(W_a q + U_a h_j)` Args: dim (int): dimensionality of query and key coverage (bool): use coverage term attn_type (str): type of attention to use, options [dot,general,mlp] attn_func (str): attention function to use, options [softmax,sparsemax] """ def __init__(self, dim, coverage=False, attn_type='dot', attn_func= 'softmax'): super().__init__() self.dim = dim assert attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type (got {:s}).'.format( attn_type) self.attn_type = attn_type assert attn_func in ['softmax', 'sparsemax' ], 'Please select a valid attention function.' self.attn_func = attn_func if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = nn.Linear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = nn.Linear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def score(self, h_t, h_s): """ Args: h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)`` h_s (FloatTensor): sequence of sources ``(batch, src_len, dim`` Returns: FloatTensor: raw attention scores (unnormalized) for each src index ``(batch, tgt_len, src_len)`` """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return # ... truncated (>4000 chars) for memory efficiency
NetModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gi/cgisfp7cjgya6wz3zy6wgsfrei7pmk7oec6olmpydu3ckl4g7flh.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_2 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (300, 784), (784, 1)) assert_size_stride(primals_3, (300, ), (1, )) assert_size_stride(primals_4, (10, 300), (300, 1)) assert_size_stride(primals_5, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 300), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 1200, grid=grid(1200), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (300, 10), (1, 300), 0), alpha=1, beta=1, out=buf2) del primals_5 return (buf2, primals_1, buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((300, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.utils.data.dataloader class NetModel(torch.nn.Module): def __init__(self): super(NetModel, self).__init__() self.hidden = torch.nn.Linear(28 * 28, 300) self.output = torch.nn.Linear(300, 10) def forward(self, x): x = x.view(-1, 28 * 28) x = self.hidden(x) x = F.relu(x) x = self.output(x) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data.dataloader assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (300, 784), (784, 1)) assert_size_stride(primals_3, (300,), (1,)) assert_size_stride(primals_4, (10, 300), (300, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 300), (300, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 300), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1200)](buf1, primals_3, 1200, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (300, 10), (1, 300), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, primals_1, buf1, primals_4 class NetModelNew(torch.nn.Module): def __init__(self): super(NetModelNew, self).__init__() self.hidden = torch.nn.Linear(28 * 28, 300) self.output = torch.nn.Linear(300, 10) def forward(self, input_0): primals_2 = self.hidden.weight primals_3 = self.hidden.bias primals_4 = self.output.weight primals_5 = self.output.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Michaelzhouisnotwhite/Learning-Gan
NetModel
false
5,594
[ "MIT" ]
1
cf1cff1f2afba296489db55f5de9ebb8405feb0e
https://github.com/Michaelzhouisnotwhite/Learning-Gan/tree/cf1cff1f2afba296489db55f5de9ebb8405feb0e
import torch import torch.nn.functional as F import torch.utils.data.dataloader class Model(torch.nn.Module): def __init__(self): super().__init__() self.hidden = torch.nn.Linear(28 * 28, 300) self.output = torch.nn.Linear(300, 10) def forward(self, x): x = x.view(-1, 28 * 28) x = self.hidden(x) x = F.relu(x) x = self.output(x) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return []
PairwiseLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5e/c5eptm6xzwd32mkban3ftbqvjp4mw3gsooweshto2ug6hb7xbg5e.py # Topologically Sorted Source Nodes: [diff, mul, sum_1], Original ATen: [aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # diff => sub # mul => mul # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) triton_per_fused_mul_sub_sum_0 = async_compile.triton('triton_per_fused_mul_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [diff, mul, sum_1], Original ATen: [aten.sub, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_mul_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.nn.parallel class PairwiseLoss(nn.Module): def __init__(self): super(PairwiseLoss, self).__init__() def forward(self, x, y): diff = x - y return torch.sum(diff * diff) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_mul_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class PairwiseLossNew(nn.Module): def __init__(self): super(PairwiseLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MinesNicaicai/large-scale-pointcloud-matching
PairwiseLoss
false
5,595
[ "MIT" ]
1
cfe140f2be1110ed75b6edd27538021e513a31c9
https://github.com/MinesNicaicai/large-scale-pointcloud-matching/tree/cfe140f2be1110ed75b6edd27538021e513a31c9
import torch import torch.utils.data import torch.nn as nn import torch.nn.parallel class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, y): diff = x - y return torch.sum(diff * diff) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/f4/cf4n4twxm5q5eh5lcjfpxlmjdbgr6xfkpjnkedxdctexycbqar7w.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ld/cldtk5skh6gtzdd62vyilgjgd55ch7o62ebbhqgbpau5cmhd5sca.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_5 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7v/c7vy54xspuettz5pgulxporznj2yqlyufnh2o2cvg7er4bnu4zox.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_5 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 32), (32, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 2048, grid=grid(2048), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 16), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf7, 1024, grid=grid(1024), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 16), (16, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils class MLP(torch.nn.Module): def __init__(self, input_dim, output_dim): super(MLP, self).__init__() self.d1 = torch.nn.Linear(input_dim, 32) self.d2 = torch.nn.Linear(32, 16) self.d3 = torch.nn.Linear(16, output_dim) self.relu = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, x): x = self.d1(x) x = self.relu(x) x = self.d2(x) x = self.relu(x) x = self.d3(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 32), (32, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 16), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(1024)](buf3, primals_5, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class MLPNew(torch.nn.Module): def __init__(self, input_dim, output_dim): super(MLPNew, self).__init__() self.d1 = torch.nn.Linear(input_dim, 32) self.d2 = torch.nn.Linear(32, 16) self.d3 = torch.nn.Linear(16, output_dim) self.relu = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.d1.weight primals_2 = self.d1.bias primals_4 = self.d2.weight primals_5 = self.d2.bias primals_6 = self.d3.weight primals_7 = self.d3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
MichaelLee-ceo/FedSAUC
MLP
false
5,596
[ "Apache-2.0" ]
1
8c00008772213562ff6a07bf9fa92c3831713118
https://github.com/MichaelLee-ceo/FedSAUC/tree/8c00008772213562ff6a07bf9fa92c3831713118
import torch from torch import nn import torch.utils class Model(torch.nn.Module): def __init__(self, input_dim, output_dim): super().__init__() self.d1 = torch.nn.Linear(input_dim, 32) self.d2 = torch.nn.Linear(32, 16) self.d3 = torch.nn.Linear(16, output_dim) self.relu = nn.ReLU() self.softmax = nn.Softmax(dim=1) def forward(self, x): x = self.d1(x) x = self.relu(x) x = self.d2(x) x = self.relu(x) x = self.d3(x) x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
CNN_DropOut
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/25/c25jycy7e5gprlbgtiyrrn7pxanfat7ddoyg4geoxxk5b3yr55vp.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_2 => convolution # x_3 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 21632 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 676) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ac/cacw4wiptbynpfgqcbrkvsw3g5rwqpp5mybqq7c5biz7ktl56fjr.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_4 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 5408 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 13 x3 = (xindex // 13) x2 = (xindex // 1352) x4 = xindex % 1352 tmp0 = tl.load(in_ptr0 + ((2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (26 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (27 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1376*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1408*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dg/cdgydpu4uv4skm66vxq4n3ixbpxejadpitghdy5bn7jlo5vnue25.py # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_5 => convolution_1 # x_6 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7744 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 121) % 16 x2 = (xindex // 1936) x4 = xindex % 1936 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + (1952*x2)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tk/ctkacddt2xdaiu5gos5eoszgznxszehacgidrhwuf6hou6fyt3aj.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_7 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) % 5 x2 = (xindex // 25) % 16 x3 = (xindex // 400) x4 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (22*x1) + (121*x2) + (1952*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (22*x1) + (121*x2) + (1952*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (11 + (2*x0) + (22*x1) + (121*x2) + (1952*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (12 + (2*x0) + (22*x1) + (121*x2) + (1952*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x4), tmp15, xmask) tl.store(out_ptr1 + (x4), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2y/c2y7nraxg57efqjgaab4g2ywiolmgkkaxdzjcqfjeb2guhagama5.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_11 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 28, 28), (784, 28, 1)) assert_size_stride(primals_2, (8, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (64, 400), (400, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (10, 64), (64, 1)) assert_size_stride(primals_9, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 28, 28), (784, 784, 28, 1), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 26, 26), (5408, 676, 26, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 21632, grid=grid(21632), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 8, 13, 13), (1376, 169, 13, 1), torch.float32) buf3 = empty_strided_cuda((4, 8, 13, 13), (1408, 169, 13, 1), torch.int8) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 5408, grid=grid(5408), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 11, 11), (1936, 121, 11, 1)) buf5 = empty_strided_cuda((4, 16, 11, 11), (1952, 121, 11, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 7744, grid=grid(7744), stream=stream0) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0) buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 64), (1, 400), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 1, 28, 28), (784, 784, 28, 1), 0), buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 28, 28), (784, 28, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils class CNN_DropOut(torch.nn.Module): """ Recommended model by "Adaptive Federated Optimization" (https://arxiv.org/pdf/2003.00295.pdf) Used for EMNIST experiments. When `only_digits=True`, the summary of returned model is ``` Model: _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= reshape (Reshape) (None, 28, 28, 1) 0 _________________________________________________________________ conv2d (Conv2D) (None, 26, 26, 32) 320 _________________________________________________________________ conv2d_1 (Conv2D) (None, 24, 24, 64) 18496 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0 _________________________________________________________________ dropout (Dropout) (None, 12, 12, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 9216) 0 _________________________________________________________________ dense (Dense) (None, 128) 1179776 _________________________________________________________________ dropout_1 (Dropout) (None, 128) 0 _________________________________________________________________ dense_1 (Dense) (None, 10) 1290 ================================================================= Total params: 1,199,882 Trainable params: 1,199,882 Non-trainable params: 0 ``` Args: only_digits: If True, uses a final layer with 10 outputs, for use with the digits only MNIST dataset (http://yann.lecun.com/exdb/mnist/). If False, uses 62 outputs for Federated Extended MNIST (FEMNIST) EMNIST: Extending MNIST to handwritten letters: https://arxiv.org/abs/1702.05373. Returns: A `torch.nn.Module`. """ def __init__(self, only_digits=True): super(CNN_DropOut, self).__init__() self.conv2d_1 = torch.nn.Conv2d(1, 8, kernel_size=3) self.max_pooling = nn.MaxPool2d(2, stride=2) self.conv2d_2 = torch.nn.Conv2d(8, 16, kernel_size=3) self.dropout_1 = nn.Dropout(0.25) self.flatten = nn.Flatten() self.linear_1 = nn.Linear(16 * 5 * 5, 64) self.dropout_2 = nn.Dropout(0.25) self.linear_2 = nn.Linear(64, 10) self.relu = nn.ReLU() def forward(self, x): x = x.view(x.shape[0], 28, 28) x = torch.unsqueeze(x, 1) x = self.conv2d_1(x) x = self.relu(x) x = self.max_pooling(x) x = self.conv2d_2(x) x = self.relu(x) x = self.max_pooling(x) x = self.dropout_1(x) x = self.flatten(x) x = self.linear_1(x) x = self.relu(x) x = self.dropout_2(x) x = self.linear_2(x) return x def get_inputs(): return [torch.rand([4, 28, 28])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 21632 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 676 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 5408 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 13 x3 = xindex // 13 x2 = xindex // 1352 x4 = xindex % 1352 tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1376 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1408 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7744 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 121 % 16 x2 = xindex // 1936 x4 = xindex % 1936 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + 1952 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 % 5 x2 = xindex // 25 % 16 x3 = xindex // 400 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (11 + 2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (12 + 2 * x0 + 22 * x1 + 121 * x2 + 1952 * x3 ), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x4, tmp15, xmask) tl.store(out_ptr1 + x4, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 28, 28), (784, 28, 1)) assert_size_stride(primals_2, (8, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (64, 400), (400, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (10, 64), (64, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 28, 28), (784, 784, 28, 1), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 26, 26), (5408, 676, 26, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(21632)](buf1, primals_3, 21632, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 8, 13, 13), (1376, 169, 13, 1), torch .float32) buf3 = empty_strided_cuda((4, 8, 13, 13), (1408, 169, 13, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(5408)](buf1, buf2, buf3, 5408, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 11, 11), (1936, 121, 11, 1)) buf5 = empty_strided_cuda((4, 16, 11, 11), (1952, 121, 11, 1), torch.float32) triton_poi_fused_convolution_relu_2[grid(7744)](buf4, primals_5, buf5, 7744, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 64), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(256)](buf9, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf10) del primals_9 return buf10, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 1, 28, 28), (784, 784, 28, 1), 0 ), buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, primals_8, primals_6 class CNN_DropOutNew(torch.nn.Module): """ Recommended model by "Adaptive Federated Optimization" (https://arxiv.org/pdf/2003.00295.pdf) Used for EMNIST experiments. When `only_digits=True`, the summary of returned model is ``` Model: _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= reshape (Reshape) (None, 28, 28, 1) 0 _________________________________________________________________ conv2d (Conv2D) (None, 26, 26, 32) 320 _________________________________________________________________ conv2d_1 (Conv2D) (None, 24, 24, 64) 18496 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0 _________________________________________________________________ dropout (Dropout) (None, 12, 12, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 9216) 0 _________________________________________________________________ dense (Dense) (None, 128) 1179776 _________________________________________________________________ dropout_1 (Dropout) (None, 128) 0 _________________________________________________________________ dense_1 (Dense) (None, 10) 1290 ================================================================= Total params: 1,199,882 Trainable params: 1,199,882 Non-trainable params: 0 ``` Args: only_digits: If True, uses a final layer with 10 outputs, for use with the digits only MNIST dataset (http://yann.lecun.com/exdb/mnist/). If False, uses 62 outputs for Federated Extended MNIST (FEMNIST) EMNIST: Extending MNIST to handwritten letters: https://arxiv.org/abs/1702.05373. Returns: A `torch.nn.Module`. """ def __init__(self, only_digits=True): super(CNN_DropOutNew, self).__init__() self.conv2d_1 = torch.nn.Conv2d(1, 8, kernel_size=3) self.max_pooling = nn.MaxPool2d(2, stride=2) self.conv2d_2 = torch.nn.Conv2d(8, 16, kernel_size=3) self.dropout_1 = nn.Dropout(0.25) self.flatten = nn.Flatten() self.linear_1 = nn.Linear(16 * 5 * 5, 64) self.dropout_2 = nn.Dropout(0.25) self.linear_2 = nn.Linear(64, 10) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.conv2d_1.weight primals_3 = self.conv2d_1.bias primals_4 = self.conv2d_2.weight primals_5 = self.conv2d_2.bias primals_6 = self.linear_1.weight primals_7 = self.linear_1.bias primals_8 = self.linear_2.weight primals_9 = self.linear_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
MichaelLee-ceo/FedSAUC
CNN_DropOut
false
5,597
[ "Apache-2.0" ]
1
8c00008772213562ff6a07bf9fa92c3831713118
https://github.com/MichaelLee-ceo/FedSAUC/tree/8c00008772213562ff6a07bf9fa92c3831713118
import torch from torch import nn import torch.utils class Model(torch.nn.Module): """ Recommended model by "Adaptive Federated Optimization" (https://arxiv.org/pdf/2003.00295.pdf) Used for EMNIST experiments. When `only_digits=True`, the summary of returned model is ``` Model: _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= reshape (Reshape) (None, 28, 28, 1) 0 _________________________________________________________________ conv2d (Conv2D) (None, 26, 26, 32) 320 _________________________________________________________________ conv2d_1 (Conv2D) (None, 24, 24, 64) 18496 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0 _________________________________________________________________ dropout (Dropout) (None, 12, 12, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 9216) 0 _________________________________________________________________ dense (Dense) (None, 128) 1179776 _________________________________________________________________ dropout_1 (Dropout) (None, 128) 0 _________________________________________________________________ dense_1 (Dense) (None, 10) 1290 ================================================================= Total params: 1,199,882 Trainable params: 1,199,882 Non-trainable params: 0 ``` Args: only_digits: If True, uses a final layer with 10 outputs, for use with the digits only MNIST dataset (http://yann.lecun.com/exdb/mnist/). If False, uses 62 outputs for Federated Extended MNIST (FEMNIST) EMNIST: Extending MNIST to handwritten letters: https://arxiv.org/abs/1702.05373. Returns: A `torch.nn.Module`. """ def __init__(self, only_digits=True): super().__init__() self.conv2d_1 = torch.nn.Conv2d(1, 8, kernel_size=3) self.max_pooling = nn.MaxPool2d(2, stride=2) self.conv2d_2 = torch.nn.Conv2d(8, 16, kernel_size=3) self.dropout_1 = nn.Dropout(0.25) self.flatten = nn.Flatten() self.linear_1 = nn.Linear(16 * 5 * 5, 64) self.dropout_2 = nn.Dropout(0.25) self.linear_2 = nn.Linear(64, 10) self.relu = nn.ReLU() def forward(self, x): x = x.view(x.shape[0], 28, 28) x = torch.unsqueeze(x, 1) x = self.conv2d_1(x) x = self.relu(x) x = self.max_pooling(x) x = self.conv2d_2(x) x = self.relu(x) x = self.max_pooling(x) x = self.dropout_1(x) x = self.flatten(x) x = self.linear_1(x) x = self.relu(x) x = self.dropout_2(x) x = self.linear_2(x) return x def get_inputs(): return [torch.rand([4, 28, 28])] def get_init_inputs(): return []
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/lh/clhtaboxxs526aw4bqcb7s6xoig5vzwco55tfg6waaga3ao3elgd.py # Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm] # Source node to ATen node mapping: # euclidean_distance => add, pow_1, pow_2, sub, sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_poi_fused_add_norm_sub_0 = async_compile.triton('triton_poi_fused_add_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + (x0), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2l/c2l2efhstxzxtce6u6qjwkvh7vawevxafq4szecegv75l7dernch.py # Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean] # Source node to ATen node mapping: # add => add_1 # clamp => clamp_min # loss_contrastive => mean # mul => mul # mul_1 => mul_1 # pow_1 => pow_3 # pow_2 => pow_4 # sub => sub_1 # sub_1 => sub_2 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_3), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %pow_2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {}) triton_per_fused_add_clamp_mean_mul_pow_rsub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + (r2), None) tmp3 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 2.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm] stream0 = get_raw_stream(0) triton_poi_fused_add_norm_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean] triton_per_fused_add_clamp_mean_mul_pow_rsub_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn.functional as F import torch.nn.parallel class ContrastiveLoss(torch.nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, is_negative): euclidean_distance = F.pairwise_distance(output1, output2) loss_contrastive = torch.mean((1 - is_negative) * torch.pow( euclidean_distance, 2) + is_negative * torch.pow(torch.clamp( self.margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 2.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(torch.nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
MinesNicaicai/large-scale-pointcloud-matching
ContrastiveLoss
false
5,598
[ "MIT" ]
1
cfe140f2be1110ed75b6edd27538021e513a31c9
https://github.com/MinesNicaicai/large-scale-pointcloud-matching/tree/cfe140f2be1110ed75b6edd27538021e513a31c9
import torch import torch.utils.data import torch.nn.functional as F import torch.nn.parallel class Model(torch.nn.Module): """ Contrastive loss function. Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=2.0): super().__init__() self.margin = margin def forward(self, output1, output2, is_negative): euclidean_distance = F.pairwise_distance(output1, output2) loss_contrastive = torch.mean((1 - is_negative) * torch.pow( euclidean_distance, 2) + is_negative * torch.pow(torch.clamp( self.margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
Attention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nu/cnuc7ivckuuly7yn2763pwt3sw72jd6vuwpeeu4sfespm5iz7fq4.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3) del arg2_1 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): def forward(self, query, key, value, mask=None, dropout=None): """Compute 'Scaled Dot Product Attention'""" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class AttentionNew(nn.Module): def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
Moymix/BERT-pytorch
Attention
false
5,599
[ "Apache-2.0" ]
1
f0b9c3ae53e05c00adcc761e0422e4222d8b5619
https://github.com/Moymix/BERT-pytorch/tree/f0b9c3ae53e05c00adcc761e0422e4222d8b5619
import math import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def forward(self, query, key, value, mask=None, dropout=None): """Compute 'Scaled Dot Product Attention'""" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
VDSR_F64B6
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jj/cjjr5irxfotmh4xreee5lxonmkib4qscmbuzwp2xvaafsncwvu3q.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] # Source node to ATen node mapping: # y => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (1, 64, 3, 3), (576, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf5, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf7, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [y_4], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf9, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf11, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [y_6], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf13, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf14, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, buf1, buf3, buf5, buf7, buf9, buf11, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class VDSR_F64B6(nn.Module): def __init__(self, model=False, fixed=False): super(VDSR_F64B6, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(64, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU(inplace=True) if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward(self, y): y = self.relu(self.conv1(y)) y = self.relu(self.conv2(y)) y = self.relu(self.conv3(y)) y = self.relu(self.conv4(y)) y = self.relu(self.conv5(y)) y = self.relu(self.conv6(y)) y = self.relu(self.conv7(y)) y = self.conv8(y) return y def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.conv8(y) out8 = y return out1, out2, out3, out4, out5, out6, out7, out8 def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.conv8(y) out8 = y return out1, out2, out3, out4, out5, out6, out7, out8 def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (1, 64, 3, 3), (576, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1048576)](buf1, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(1048576)](buf3, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(1048576)](buf5, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_relu_0[grid(1048576)](buf7, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_relu_0[grid(1048576)](buf9, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_relu_0[grid(1048576)](buf11, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_relu_0[grid(1048576)](buf13, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf14, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, buf1, buf3, buf5, buf7, buf9, buf11, buf13) def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class VDSR_F64B6New(nn.Module): def __init__(self, model=False, fixed=False): super(VDSR_F64B6New, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(64, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU(inplace=True) if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.conv8(y) out8 = y return out1, out2, out3, out4, out5, out6, out7, out8 def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.conv8(y) out8 = y return out1, out2, out3, out4, out5, out6, out7, out8 def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_7 = self.conv6.weight primals_8 = self.conv7.weight primals_9 = self.conv8.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
MingSun-Tse/pytorch-vdsr
VDSR_F64B6
false
5,600
[ "MIT" ]
1
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class Model(nn.Module): def __init__(self, model=False, fixed=False): super().__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(64, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU(inplace=True) if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward(self, y): y = self.relu(self.conv1(y)) y = self.relu(self.conv2(y)) y = self.relu(self.conv3(y)) y = self.relu(self.conv4(y)) y = self.relu(self.conv5(y)) y = self.relu(self.conv6(y)) y = self.relu(self.conv7(y)) y = self.conv8(y) return y def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.conv8(y) out8 = y return out1, out2, out3, out4, out5, out6, out7, out8 def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.conv8(y) out8 = y return out1, out2, out3, out4, out5, out6, out7, out8 def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return []
SmallVDSR_16x
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xt/cxtkxbuil7mp4w2vi2sujbaxa6dkeig5rsoswqo7mepdr66ra5cf.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] # Source node to ATen node mapping: # y => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args args.clear() assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_10, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_11, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_12, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_13, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_14, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_16, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_17, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_18, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_19, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_20, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_21, (1, 16, 3, 3), (144, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf5, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf7, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [y_4], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf9, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf11, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [y_6], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf13, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf15, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [y_8], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf17, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf19, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [y_10], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf21, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [y_11], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf23, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [y_12], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf25, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [y_13], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf27, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [y_14], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf29, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [y_15], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf31, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [y_16], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf33, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [y_17], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf35, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf37, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [y_19], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((1, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class SmallVDSR_16x(nn.Module): def __init__(self, model=False, fixed=False): super(SmallVDSR_16x, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 16, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(16, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU() if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) out20 = y return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, out20) def forward(self, y): y = self.relu(self.conv1(y)) y = self.relu(self.conv2(y)) y = self.relu(self.conv3(y)) y = self.relu(self.conv4(y)) y = self.relu(self.conv5(y)) y = self.relu(self.conv6(y)) y = self.relu(self.conv7(y)) y = self.relu(self.conv8(y)) y = self.relu(self.conv9(y)) y = self.relu(self.conv10(y)) y = self.relu(self.conv11(y)) y = self.relu(self.conv12(y)) y = self.relu(self.conv13(y)) y = self.relu(self.conv14(y)) y = self.relu(self.conv15(y)) y = self.relu(self.conv16(y)) y = self.relu(self.conv17(y)) y = self.relu(self.conv18(y)) y = self.relu(self.conv19(y)) y = self.conv20(y) return y def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (16, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_5, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_6, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_7, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_10, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_11, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_12, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_13, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_14, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_15, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_16, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_17, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_18, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_19, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_20, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_21, (1, 16, 3, 3), (144, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(262144)](buf1, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(262144)](buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(262144)](buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_relu_0[grid(262144)](buf7, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_relu_0[grid(262144)](buf9, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_relu_0[grid(262144)](buf11, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_relu_0[grid(262144)](buf13, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_relu_0[grid(262144)](buf15, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf17 = buf16 del buf16 triton_poi_fused_relu_0[grid(262144)](buf17, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf19 = buf18 del buf18 triton_poi_fused_relu_0[grid(262144)](buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_relu_0[grid(262144)](buf21, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_relu_0[grid(262144)](buf23, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_relu_0[grid(262144)](buf25, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf27 = buf26 del buf26 triton_poi_fused_relu_0[grid(262144)](buf27, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf29 = buf28 del buf28 triton_poi_fused_relu_0[grid(262144)](buf29, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf31 = buf30 del buf30 triton_poi_fused_relu_0[grid(262144)](buf31, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf33 = buf32 del buf32 triton_poi_fused_relu_0[grid(262144)](buf33, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf35 = buf34 del buf34 triton_poi_fused_relu_0[grid(262144)](buf35, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf37 = buf36 del buf36 triton_poi_fused_relu_0[grid(262144)](buf37, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37) def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class SmallVDSR_16xNew(nn.Module): def __init__(self, model=False, fixed=False): super(SmallVDSR_16xNew, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 16, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(16, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU() if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) out20 = y return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, out20) def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_7 = self.conv6.weight primals_8 = self.conv7.weight primals_9 = self.conv8.weight primals_10 = self.conv9.weight primals_11 = self.conv10.weight primals_12 = self.conv11.weight primals_13 = self.conv12.weight primals_14 = self.conv13.weight primals_15 = self.conv14.weight primals_16 = self.conv15.weight primals_17 = self.conv16.weight primals_18 = self.conv17.weight primals_19 = self.conv18.weight primals_20 = self.conv19.weight primals_21 = self.conv20.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
MingSun-Tse/pytorch-vdsr
SmallVDSR_16x
false
5,601
[ "MIT" ]
1
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class Model(nn.Module): def __init__(self, model=False, fixed=False): super().__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 16, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(16, 16, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(16, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU() if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y # ... truncated (>4000 chars) for memory efficiency
Squash
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/aq/caqnx7bnsdfn2e6tzirgxkuwohtkneweobuzqcu3252knhg7lubg.py # Topologically Sorted Source Nodes: [pow_1, squared_norm, add, scale, mul, sqrt, add_1, truediv_1], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # pow_1 => pow_1 # scale => div # sqrt => sqrt # squared_norm => sum_1 # truediv_1 => div_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %arg0_1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = tmp13 * tmp14 tmp16 = libdevice.sqrt(tmp10) tmp17 = 1e-08 tmp18 = tmp16 + tmp17 tmp19 = tmp15 / tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, squared_norm, add, scale, mul, sqrt, add_1, truediv_1], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Squash(nn.Module): def forward(self, x, dim=-1): squared_norm = (x ** 2).sum(dim=dim, keepdim=True) scale = squared_norm / (1 + squared_norm) return scale * x / (squared_norm.sqrt() + 1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = tmp13 * tmp14 tmp16 = libdevice.sqrt(tmp10) tmp17 = 1e-08 tmp18 = tmp16 + tmp17 tmp19 = tmp15 / tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquashNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MobtgZhang/MWMLNet
Squash
false
5,602
[ "MIT" ]
1
125bb39935916b6b4be505c51cb6a04eb49b96d0
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
import torch import torch.nn as nn class Model(nn.Module): def forward(self, x, dim=-1): squared_norm = (x ** 2).sum(dim=dim, keepdim=True) scale = squared_norm / (1 + squared_norm) return scale * x / (squared_norm.sqrt() + 1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ResConnectionLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/df/cdfcie57v6pcdd6oeaz4mvlgksxgyuxzmlv5bklwemyulqhtcxta.py # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # std => sqrt, var # sub => sub # truediv => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ej/cejqrrx6xg2ykx3zpkubs7k47thei4h2ldnfxjlnconk37jvowpc.py # Topologically Sorted Source Nodes: [mul_1, pow_1, mul_2, add_2, mul_3, tanh, add_3, hid], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add_2 => add_2 # add_3 => add_3 # hid => mul_4 # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_3,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %add_3), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/b3/cb3g6fwupaz5a5j23ckgaqji56bsmt4ixc37lwt344u76m75fqhf.py # Topologically Sorted Source Nodes: [add_4], Original ATen: [aten.add] # Source node to ATen node mapping: # add_4 => add_4 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_3), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, pow_1, mul_2, add_2, mul_3, tanh, add_3, hid], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] triton_poi_fused_add_mul_pow_tanh_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [add_4], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf4, primals_1, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class LayerNorm(nn.Module): """Construct a layernorm module (See citation for details).""" def __init__(self, features, eps=1e-06): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class GELU(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class FeedForwardNetwork(nn.Module): def __init__(self, in_dim, hid_dim) ->None: super().__init__() self.lin1 = nn.Linear(in_dim, hid_dim) self.lin2 = nn.Linear(hid_dim, in_dim) self.gleu = GELU() self.dropout = nn.Dropout() def forward(self, inputs): hid = self.gleu(self.lin1(inputs)) return self.lin2(self.dropout(hid)) class ResConnectionLayer(nn.Module): def __init__(self, in_dim, dropout): super(ResConnectionLayer, self).__init__() self.norm = LayerNorm(in_dim) self.dropout = nn.Dropout(dropout) self.ffn = FeedForwardNetwork(in_dim, in_dim) def forward(self, x): """Apply residual connection to any sublayer with the same size.""" return x + self.dropout(self.ffn(self.norm(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_mul_pow_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_tanh_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_2[grid(256)](buf4, primals_1, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf4, primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), primals_6, primals_4 class LayerNorm(nn.Module): """Construct a layernorm module (See citation for details).""" def __init__(self, features, eps=1e-06): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class GELU(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class FeedForwardNetwork(nn.Module): def __init__(self, in_dim, hid_dim) ->None: super().__init__() self.lin1 = nn.Linear(in_dim, hid_dim) self.lin2 = nn.Linear(hid_dim, in_dim) self.gleu = GELU() self.dropout = nn.Dropout() def forward(self, inputs): hid = self.gleu(self.lin1(inputs)) return self.lin2(self.dropout(hid)) class ResConnectionLayerNew(nn.Module): def __init__(self, in_dim, dropout): super(ResConnectionLayerNew, self).__init__() self.norm = LayerNorm(in_dim) self.dropout = nn.Dropout(dropout) self.ffn = FeedForwardNetwork(in_dim, in_dim) def forward(self, input_0): primals_2 = self.norm.a_2 primals_3 = self.norm.b_2 primals_4 = self.ffn.lin1.weight primals_5 = self.ffn.lin1.bias primals_6 = self.ffn.lin2.weight primals_7 = self.ffn.lin2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
MobtgZhang/MWMLNet
ResConnectionLayer
false
5,603
[ "MIT" ]
1
125bb39935916b6b4be505c51cb6a04eb49b96d0
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
import math import torch import torch.nn as nn class LayerNorm(nn.Module): """Construct a layernorm module (See citation for details).""" def __init__(self, features, eps=1e-06): super().__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class GELU(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class FeedForwardNetwork(nn.Module): def __init__(self, in_dim, hid_dim) ->None: super().__init__() self.lin1 = nn.Linear(in_dim, hid_dim) self.lin2 = nn.Linear(hid_dim, in_dim) self.gleu = GELU() self.dropout = nn.Dropout() def forward(self, inputs): hid = self.gleu(self.lin1(inputs)) return self.lin2(self.dropout(hid)) class Model(nn.Module): def __init__(self, in_dim, dropout): super().__init__() self.norm = LayerNorm(in_dim) self.dropout = nn.Dropout(dropout) self.ffn = FeedForwardNetwork(in_dim, in_dim) def forward(self, x): """Apply residual connection to any sublayer with the same size.""" return x + self.dropout(self.ffn(self.norm(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 0.5]
AE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/lp/clp5td7lbqtje3pt7v6xbcp766swgazqemomz2nzsxtdtmjesxht.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [9, 9], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fx/cfxuycpiorfjquslszyvaiul2h6o52zfes7nl5luwduiyxejuafg.py # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool2d => _low_memory_max_pool2d_offsets_to_indices, _low_memory_max_pool2d_with_offsets, getitem # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %_low_memory_max_pool2d_offsets_to_indices : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_1, 2, 64, [2, 2], [0, 0]), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex x2 = (xindex // 32) % 32 tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tmp17 = tl.full([1], 2, tl.int32) tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 != 0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp16 - tmp19 tmp21 = 2*x2 tmp22 = tmp21 + tmp18 tmp23 = 2*x0 tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 64, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + (x4), tmp6, None) tl.store(out_ptr1 + (x4), tmp27, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/n2/cn23y7jhz2xygynfpumprse4torkd7nlohv4kacxis56jwb54k5e.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [7, 7], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/b5/cb53yctlr3gz2rfsuo4fslnritazzf5vgsiae5sejhqagushb4hc.py # Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool2d_1 => _low_memory_max_pool2d_offsets_to_indices_1, _low_memory_max_pool2d_with_offsets_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %_low_memory_max_pool2d_offsets_to_indices_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_3, 2, 32, [2, 2], [0, 0]), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x3 = (xindex // 16) x1 = (xindex // 16) % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x3)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x3)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x3)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x3)), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 2, tl.int32) tmp18 = tl.where((tmp15 < 0) != (tmp17 < 0), tl.where(tmp15 % tmp17 != 0, tmp15 // tmp17 - 1, tmp15 // tmp17), tmp15 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp15 - tmp19 tmp21 = 2*x1 tmp22 = tmp21 + tmp18 tmp23 = 2*x0 tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 32, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + (x4), tmp27, None) tl.store(out_ptr1 + (x4), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rb/crbjzrv5jiob43hnwmv5pasmjf4rrkawp7xunizdmnue24skyybh.py # Topologically Sorted Source Nodes: [conv_transpose2d, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv_transpose2d => convolution_2 # x_5 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%max_unpool2d, %primals_6, %primals_7, [1, 1], [7, 7], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, None) tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pg/cpgg4oz3hvqpwyi5s6ig3dfrsdbicyjavvtp2wvyfnoivug2bvhp.py # Topologically Sorted Source Nodes: [conv_transpose2d_1, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv_transpose2d_1 => convolution_3 # x_7 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%max_unpool2d_1, %primals_8, %primals_9, [1, 1], [9, 9], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, None) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (16, 1, 19, 19), (361, 361, 19, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (4, 16, 15, 15), (3600, 225, 15, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 16, 15, 15), (3600, 225, 15, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (16, 1, 19, 19), (361, 361, 19, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(9, 9), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int64) # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(7, 7), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 16384, grid=grid(16384), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.int64) buf7 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 4096, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [max_pool2d_1, x_4], Original ATen: [aten.max_pool2d_with_indices, aten.max_unpool2d] buf8 = torch.ops.aten.max_unpool2d.default(buf7, buf6, [32, 32]) del buf7 buf9 = buf8 del buf8 # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(7, 7), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf11 = buf10; del buf10 # reuse buf17 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [conv_transpose2d, x_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_4.run(buf11, primals_7, buf17, 65536, grid=grid(65536), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_unpool2d] buf12 = torch.ops.aten.max_unpool2d.default(buf11, buf3, [64, 64]) del buf11 buf13 = buf12 del buf12 # Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_8, stride=(1, 1), padding=(9, 9), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse buf16 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv_transpose2d_1, x_7], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_5.run(buf15, primals_9, buf16, 16384, grid=grid(16384), stream=stream0) del primals_9 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf9, buf13, buf16, buf17, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 1, 19, 19), (361, 361, 19, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 16, 15, 15), (3600, 225, 15, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 16, 15, 15), (3600, 225, 15, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 1, 19, 19), (361, 361, 19, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.utils.data.distributed class AE(nn.Module): def __init__(self): super(AE, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=19, padding=9) self.conv2 = nn.Conv2d(16, 4, kernel_size=15, padding=7) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.t_conv1 = nn.ConvTranspose2d(4, 16, kernel_size=15, padding=7) self.t_conv2 = nn.ConvTranspose2d(16, 1, kernel_size=19, padding=9) self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.relu(self.conv1(x)) x, pool1 = self.pool(x) x = self.relu(self.conv2(x)) x, pool2 = self.pool(x) x = self.unpool(x, pool2) x = self.relu(self.t_conv1(x)) x = self.unpool(x, pool1) x = self.relu(self.t_conv2(x)) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x4 = xindex x2 = xindex // 32 % 32 tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tmp17 = tl.full([1], 2, tl.int32) tmp18 = tl.where((tmp16 < 0) != (tmp17 < 0), tl.where(tmp16 % tmp17 != 0, tmp16 // tmp17 - 1, tmp16 // tmp17), tmp16 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp16 - tmp19 tmp21 = 2 * x2 tmp22 = tmp21 + tmp18 tmp23 = 2 * x0 tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 64, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + x4, tmp6, None) tl.store(out_ptr1 + x4, tmp27, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x3 = xindex // 16 x1 = xindex // 16 % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x3), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x3), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x3), None, eviction_policy ='evict_last') tmp12 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x3), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 2, tl.int32) tmp18 = tl.where((tmp15 < 0) != (tmp17 < 0), tl.where(tmp15 % tmp17 != 0, tmp15 // tmp17 - 1, tmp15 // tmp17), tmp15 // tmp17) tmp19 = tmp18 * tmp17 tmp20 = tmp15 - tmp19 tmp21 = 2 * x1 tmp22 = tmp21 + tmp18 tmp23 = 2 * x0 tmp24 = tmp23 + tmp20 tmp25 = tl.full([1], 32, tl.int64) tmp26 = tmp22 * tmp25 tmp27 = tmp26 + tmp24 tl.store(out_ptr0 + x4, tmp27, None) tl.store(out_ptr1 + x4, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, None) tl.store(out_ptr0 + x0, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (16, 1, 19, 19), (361, 361, 19, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (4, 16, 15, 15), (3600, 225, 15, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 16, 15, 15), (3600, 225, 15, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 1, 19, 19), (361, 361, 19, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(9, 9), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.int64) triton_poi_fused_max_pool2d_with_indices_1[grid(65536)](buf1, buf2, buf3, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(7, 7), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(16384)](buf5, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .int64) buf7 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_3[grid(4096)](buf5, buf6, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1) buf8 = torch.ops.aten.max_unpool2d.default(buf7, buf6, [32, 32]) del buf7 buf9 = buf8 del buf8 buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(7, 7), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 32, 32), (16384, 1024, 32, 1)) buf11 = buf10 del buf10 buf17 = empty_strided_cuda((4, 16, 32, 32), (16384, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(65536)]( buf11, primals_7, buf17, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf12 = torch.ops.aten.max_unpool2d.default(buf11, buf3, [64, 64]) del buf11 buf13 = buf12 del buf12 buf14 = extern_kernels.convolution(buf13, primals_8, stride=(1, 1), padding=(9, 9), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf15 = buf14 del buf14 buf16 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_5[grid(16384)]( buf15, primals_9, buf16, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf5, buf6, buf9, buf13, buf16, buf17) class AENew(nn.Module): def __init__(self): super(AENew, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=19, padding=9) self.conv2 = nn.Conv2d(16, 4, kernel_size=15, padding=7) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.t_conv1 = nn.ConvTranspose2d(4, 16, kernel_size=15, padding=7) self.t_conv2 = nn.ConvTranspose2d(16, 1, kernel_size=19, padding=9) self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.t_conv1.weight primals_7 = self.t_conv1.bias primals_8 = self.t_conv2.weight primals_9 = self.t_conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Minauras/deepdefresneling
AE
false
5,604
[ "BSD-2-Clause" ]
1
e17168e9a8d322201998c73da54efbd334b0ffb9
https://github.com/Minauras/deepdefresneling/tree/e17168e9a8d322201998c73da54efbd334b0ffb9
import torch from torch import nn import torch.utils.data import torch.utils.data.distributed class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=19, padding=9) self.conv2 = nn.Conv2d(16, 4, kernel_size=15, padding=7) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True) self.t_conv1 = nn.ConvTranspose2d(4, 16, kernel_size=15, padding=7) self.t_conv2 = nn.ConvTranspose2d(16, 1, kernel_size=19, padding=9) self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.relu(self.conv1(x)) x, pool1 = self.pool(x) x = self.relu(self.conv2(x)) x, pool2 = self.pool(x) x = self.unpool(x, pool2) x = self.relu(self.t_conv1(x)) x = self.unpool(x, pool1) x = self.relu(self.t_conv2(x)) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return []
Lookahead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xf/cxfjorpj7nzakemkplfiue4lxn3wssubujqanweih6rojbuoolb7.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_1 => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%permute_1, [0, 3], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 8], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 7 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = x1 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (y0 + (16*x1)), tmp2 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x1 + (7*y0)), tmp3, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/r7/cr7evfztwmdzfk7dfomwxcc4g457gqiibpdthzqr2y7vyuw4ptpg.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_3 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 16, 7, grid=grid(16, 7), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, buf2, 4, 16, grid=grid(4, 16), stream=stream0) del buf1 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data.distributed import torch.nn as nn import torch.nn.functional as F class Lookahead(nn.Module): def __init__(self, n_features, context): super(Lookahead, self).__init__() assert context > 0 self.context = context self.n_features = n_features self.pad = 0, self.context - 1 self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size =self.context, stride=1, groups=self.n_features, padding=0, bias=None) def forward(self, x): x = x.transpose(0, 1).transpose(1, 2) x = F.pad(x, pad=self.pad, value=0) x = self.conv(x) x = x.transpose(1, 2).transpose(0, 1).contiguous() return x def __repr__(self): return self.__class__.__name__ + '(' + 'n_features=' + str(self. n_features) + ', context=' + str(self.context) + ')' def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4, 'context': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.distributed import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 7 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = x1 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (y0 + 16 * x1), tmp2 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x1 + 7 * y0), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(16, 7)](primals_1, buf0, 16, 7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(4, 16)](buf1, buf2, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del buf1 return buf2, primals_2, buf0 class LookaheadNew(nn.Module): def __init__(self, n_features, context): super(LookaheadNew, self).__init__() assert context > 0 self.context = context self.n_features = n_features self.pad = 0, self.context - 1 self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size =self.context, stride=1, groups=self.n_features, padding=0, bias=None) def __repr__(self): return self.__class__.__name__ + '(' + 'n_features=' + str(self. n_features) + ', context=' + str(self.context) + ')' def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
MrXJC/deepspeech.pytorch
Lookahead
false
5,605
[ "MIT" ]
1
6379c18d3f56cad8896a51d45166ea979423e0bf
https://github.com/MrXJC/deepspeech.pytorch/tree/6379c18d3f56cad8896a51d45166ea979423e0bf
import torch import torch.utils.data.distributed import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, n_features, context): super().__init__() assert context > 0 self.context = context self.n_features = n_features self.pad = 0, self.context - 1 self.conv = nn.Conv1d(self.n_features, self.n_features, kernel_size =self.context, stride=1, groups=self.n_features, padding=0, bias=None) def forward(self, x): x = x.transpose(0, 1).transpose(1, 2) x = F.pad(x, pad=self.pad, value=0) x = self.conv(x) x = x.transpose(1, 2).transpose(0, 1).contiguous() return x def __repr__(self): return self.__class__.__name__ + '(' + 'n_features=' + str(self. n_features) + ', context=' + str(self.context) + ')' def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
VDSR
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jj/cjjr5irxfotmh4xreee5lxonmkib4qscmbuzwp2xvaafsncwvu3q.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] # Source node to ATen node mapping: # y => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_13, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_21, (1, 64, 3, 3), (576, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf5, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf7, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [y_4], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf9, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf11, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [y_6], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf13, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf15, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [y_8], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf17, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf19, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [y_10], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf21, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [y_11], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf23, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [y_12], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf25, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [y_13], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf27, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [y_14], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf29, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [y_15], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf31, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [y_16], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf33, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [y_17], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf35, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf37, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [y_19], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class VDSR(nn.Module): def __init__(self, model=False, fixed=False): super(VDSR, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(64, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU(inplace=True) if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward(self, y): y = self.relu(self.conv1(y)) y = self.relu(self.conv2(y)) y = self.relu(self.conv3(y)) y = self.relu(self.conv4(y)) y = self.relu(self.conv5(y)) y = self.relu(self.conv6(y)) y = self.relu(self.conv7(y)) y = self.relu(self.conv8(y)) y = self.relu(self.conv9(y)) y = self.relu(self.conv10(y)) y = self.relu(self.conv11(y)) y = self.relu(self.conv12(y)) y = self.relu(self.conv13(y)) y = self.relu(self.conv14(y)) y = self.relu(self.conv15(y)) y = self.relu(self.conv16(y)) y = self.relu(self.conv17(y)) y = self.relu(self.conv18(y)) y = self.relu(self.conv19(y)) y = self.conv20(y) return y def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) out20 = y return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, out20) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_12, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_13, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_20, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_21, (1, 64, 3, 3), (576, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1048576)](buf1, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(1048576)](buf3, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(1048576)](buf5, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_relu_0[grid(1048576)](buf7, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_relu_0[grid(1048576)](buf9, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_relu_0[grid(1048576)](buf11, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_relu_0[grid(1048576)](buf13, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_relu_0[grid(1048576)](buf15, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf17 = buf16 del buf16 triton_poi_fused_relu_0[grid(1048576)](buf17, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf19 = buf18 del buf18 triton_poi_fused_relu_0[grid(1048576)](buf19, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_relu_0[grid(1048576)](buf21, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_relu_0[grid(1048576)](buf23, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_relu_0[grid(1048576)](buf25, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf27 = buf26 del buf26 triton_poi_fused_relu_0[grid(1048576)](buf27, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf29 = buf28 del buf28 triton_poi_fused_relu_0[grid(1048576)](buf29, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf31 = buf30 del buf30 triton_poi_fused_relu_0[grid(1048576)](buf31, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf33 = buf32 del buf32 triton_poi_fused_relu_0[grid(1048576)](buf33, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf35 = buf34 del buf34 triton_poi_fused_relu_0[grid(1048576)](buf35, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf37 = buf36 del buf36 triton_poi_fused_relu_0[grid(1048576)](buf37, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37) def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class VDSRNew(nn.Module): def __init__(self, model=False, fixed=False): super(VDSRNew, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(64, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU(inplace=True) if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) out20 = y return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, out20) def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_7 = self.conv6.weight primals_8 = self.conv7.weight primals_9 = self.conv8.weight primals_10 = self.conv9.weight primals_11 = self.conv10.weight primals_12 = self.conv11.weight primals_13 = self.conv12.weight primals_14 = self.conv13.weight primals_15 = self.conv14.weight primals_16 = self.conv15.weight primals_17 = self.conv16.weight primals_18 = self.conv17.weight primals_19 = self.conv18.weight primals_20 = self.conv19.weight primals_21 = self.conv20.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
MingSun-Tse/pytorch-vdsr
VDSR
false
5,606
[ "MIT" ]
1
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class Model(nn.Module): def __init__(self, model=False, fixed=False): super().__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 64, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(64, 64, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(64, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU(inplace=True) if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward(self, y): y = self.relu(self.conv1(y)) y = self.relu(self.conv2(y)) y = self.relu(self.conv3(y)) y = self.relu(self.conv4(y)) y = self.relu(self.conv5(y)) y = self.relu(self.conv6(y)) y = self.relu(self.conv7(y)) y = self.relu(self.conv8(y)) y = self.relu(self.conv9(y)) y = self.relu(self.conv10(y)) y = self.relu(self.conv11(y)) y = self.relu(self.conv12(y)) y = self.relu(self.conv13(y)) y = self.relu(self.conv14(y)) y = self.relu(self.conv15(y)) y = self.relu(self.conv16(y)) y = self.relu(self.conv17(y)) y = self.relu(self.conv18(y)) y = self.relu(self.conv19(y)) y = self.conv20(y) return y def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self. # ... truncated (>4000 chars) for memory efficiency
DepthwiseSeparableConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/53/c53345w2ogarycgzyrcothtqrrb7taubpprhokfthwhic4knqepc.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 5) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf0, (1, 4, 5), (20, 5, 1)) # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 5), (20, 5, 1)) buf2 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 5), (5, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_4, buf3, 20, grid=grid(20), stream=stream0) del primals_4 return (buf2, primals_1, primals_3, reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class DepthwiseSeparableConv(nn.Module): def __init__(self, in_ch, out_ch, k, bias=True): super().__init__() self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= in_ch, kernel_size=k, groups=in_ch, padding=k // 2, bias=False) self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= out_ch, kernel_size=1, padding=0, bias=bias) def forward(self, x): return F.relu(self.pointwise_conv(self.depthwise_conv(x))) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4, 'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf0, (1, 4, 5), (20, 5, 1)) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 5), (20, 5, 1)) buf2 = reinterpret_tensor(buf1, (4, 5), (5, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 5), (5, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(20)](buf2, primals_4, buf3, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_4 return buf2, primals_1, primals_3, reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf0, buf3 class DepthwiseSeparableConvNew(nn.Module): def __init__(self, in_ch, out_ch, k, bias=True): super().__init__() self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= in_ch, kernel_size=k, groups=in_ch, padding=k // 2, bias=False) self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= out_ch, kernel_size=1, padding=0, bias=bias) def forward(self, input_0): primals_1 = self.depthwise_conv.weight primals_3 = self.pointwise_conv.weight primals_4 = self.pointwise_conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
MobtgZhang/MWMLNet
DepthwiseSeparableConv
false
5,607
[ "MIT" ]
1
125bb39935916b6b4be505c51cb6a04eb49b96d0
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, in_ch, out_ch, k, bias=True): super().__init__() self.depthwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= in_ch, kernel_size=k, groups=in_ch, padding=k // 2, bias=False) self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= out_ch, kernel_size=1, padding=0, bias=bias) def forward(self, x): return F.relu(self.pointwise_conv(self.depthwise_conv(x))) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
SFU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py # Topologically Sorted Source Nodes: [r_f], Original ATen: [aten.cat] # Source node to ATen node mapping: # r_f => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4h/c4hvd3vhapir7uardh64cxnc57lcta2i2xqwszrnyxam4nh7uanb.py # Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # g => sigmoid # mul => mul # mul_1 => mul_1 # o => add # r => tanh # sub => sub # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp7 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [r_f], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add] triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf2, buf1, primals_1, buf3, 64, grid=grid(64), stream=stream0) return (buf3, primals_1, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SFU(nn.Module): """Semantic Fusion Unit The ouput vector is expected to not only retrieve correlative information from fusion vectors, but also retain partly unchange as the input vector """ def __init__(self, input_size, fusion_size): super(SFU, self).__init__() self.linear_r = nn.Linear(input_size + fusion_size, input_size) self.linear_g = nn.Linear(input_size + fusion_size, input_size) def forward(self, x, fusions): r_f = torch.cat([x, fusions], 2) r = torch.tanh(self.linear_r(r_f)) g = torch.sigmoid(self.linear_g(r_f)) o = g * r + (1 - g) * x return o def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'fusion_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp7 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = libdevice.tanh(tmp2) tmp4 = tmp1 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(64)](buf2, buf1, primals_1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_1, reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf1, buf2 class SFUNew(nn.Module): """Semantic Fusion Unit The ouput vector is expected to not only retrieve correlative information from fusion vectors, but also retain partly unchange as the input vector """ def __init__(self, input_size, fusion_size): super(SFUNew, self).__init__() self.linear_r = nn.Linear(input_size + fusion_size, input_size) self.linear_g = nn.Linear(input_size + fusion_size, input_size) def forward(self, input_0, input_1): primals_3 = self.linear_r.weight primals_4 = self.linear_r.bias primals_5 = self.linear_g.weight primals_6 = self.linear_g.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
MobtgZhang/MWMLNet
SFU
false
5,608
[ "MIT" ]
1
125bb39935916b6b4be505c51cb6a04eb49b96d0
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
import torch import torch.nn as nn class Model(nn.Module): """Semantic Fusion Unit The ouput vector is expected to not only retrieve correlative information from fusion vectors, but also retain partly unchange as the input vector """ def __init__(self, input_size, fusion_size): super().__init__() self.linear_r = nn.Linear(input_size + fusion_size, input_size) self.linear_g = nn.Linear(input_size + fusion_size, input_size) def forward(self, x, fusions): r_f = torch.cat([x, fusions], 2) r = torch.tanh(self.linear_r(r_f)) g = torch.sigmoid(self.linear_g(r_f)) o = g * r + (1 - g) * x return o def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
Attention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nu/cnuc7ivckuuly7yn2763pwt3sw72jd6vuwpeeu4sfespm5iz7fq4.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3) del arg2_1 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.nn as nn class Attention(nn.Module): """ Compute 'Scaled Dot Product Attention """ def forward(self, query, key, value, mask=None, dropout=None): scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query .size(-1)) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class AttentionNew(nn.Module): """ Compute 'Scaled Dot Product Attention """ def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
MobtgZhang/MWMLNet
Attention
false
5,609
[ "MIT" ]
1
125bb39935916b6b4be505c51cb6a04eb49b96d0
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
import math import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """ Compute 'Scaled Dot Product Attention """ def forward(self, query, key, value, mask=None, dropout=None): scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query .size(-1)) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
Sharpen_Block
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/hi/chieqt2p7y3xojubgmoetn3r5tmgtaq7aft5irb7n2uvwm6zuiqy.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # pad => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [pad, conv2d], Original ATen: [aten.reflection_pad2d, aten.convolution] buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class Sharpen_Block(nn.Module): def __init__(self): super(Sharpen_Block, self).__init__() self.pad = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv = nn.Conv2d(1, 1, 3, 1, 0, bias=False) self.conv.weight = nn.Parameter(torch.from_numpy(np.array([[[[0, - 0.4, 0], [0, 2.6, 0], [0, -0.4, 0]]]])).float()) self.conv.weight.requires_grad = False def forward(self, x): return self.conv(self.pad(x)) def get_inputs(): return [torch.rand([4, 1, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 6, 6), (36, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(144)](arg0_1, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(buf0, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 del buf0 return buf1, class Sharpen_BlockNew(nn.Module): def __init__(self): super(Sharpen_BlockNew, self).__init__() self.pad = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv = nn.Conv2d(1, 1, 3, 1, 0, bias=False) self.conv.weight = nn.Parameter(torch.from_numpy(np.array([[[[0, - 0.4, 0], [0, 2.6, 0], [0, -0.4, 0]]]])).float()) self.conv.weight.requires_grad = False def forward(self, input_0): arg1_1 = self.conv.weight arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
MingSun-Tse/pytorch-vdsr
Sharpen_Block
false
5,610
[ "MIT" ]
1
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
import torch import numpy as np import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.pad = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv = nn.Conv2d(1, 1, 3, 1, 0, bias=False) self.conv.weight = nn.Parameter(torch.from_numpy(np.array([[[[0, - 0.4, 0], [0, 2.6, 0], [0, -0.4, 0]]]])).float()) self.conv.weight.requires_grad = False def forward(self, x): return self.conv(self.pad(x)) def get_inputs(): return [torch.rand([4, 1, 4, 4])] def get_init_inputs(): return []
SmallVDSR_F8
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/of/cofqdm26j473y3qugzsd27huz6mlmh4piozcssdqtsv6l7udq3bj.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] # Source node to ATen node mapping: # y => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args args.clear() assert_size_stride(primals_1, (8, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_6, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_8, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_10, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_11, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_12, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_13, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_14, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_15, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_16, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_17, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_18, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_19, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_20, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_21, (1, 8, 3, 3), (72, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf5, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf7, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [y_4], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf9, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf11, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [y_6], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf13, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf15, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [y_8], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf17, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf19, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [y_10], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf21, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [y_11], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf23, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [y_12], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf25, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [y_13], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf27, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [y_14], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf29, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [y_15], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf31, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [y_16], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf33, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [y_17], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf35, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf37, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [y_19], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((1, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class SmallVDSR_F8(nn.Module): def __init__(self, model=False, fixed=False): super(SmallVDSR_F8, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 8, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(8, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU() if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) out20 = y return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, out20) def forward(self, y): y = self.relu(self.conv1(y)) y = self.relu(self.conv2(y)) y = self.relu(self.conv3(y)) y = self.relu(self.conv4(y)) y = self.relu(self.conv5(y)) y = self.relu(self.conv6(y)) y = self.relu(self.conv7(y)) y = self.relu(self.conv8(y)) y = self.relu(self.conv9(y)) y = self.relu(self.conv10(y)) y = self.relu(self.conv11(y)) y = self.relu(self.conv12(y)) y = self.relu(self.conv13(y)) y = self.relu(self.conv14(y)) y = self.relu(self.conv15(y)) y = self.relu(self.conv16(y)) y = self.relu(self.conv17(y)) y = self.relu(self.conv18(y)) y = self.relu(self.conv19(y)) y = self.conv20(y) return y def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21) = args args.clear() assert_size_stride(primals_1, (8, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_3, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_6, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_8, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_10, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_11, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_12, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_13, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_14, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_15, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_16, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_17, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_18, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_19, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_20, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_21, (1, 8, 3, 3), (72, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(131072)](buf1, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(131072)](buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(131072)](buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_relu_0[grid(131072)](buf7, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_relu_0[grid(131072)](buf9, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_relu_0[grid(131072)](buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_relu_0[grid(131072)](buf13, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_relu_0[grid(131072)](buf15, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf17 = buf16 del buf16 triton_poi_fused_relu_0[grid(131072)](buf17, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf19 = buf18 del buf18 triton_poi_fused_relu_0[grid(131072)](buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_relu_0[grid(131072)](buf21, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_relu_0[grid(131072)](buf23, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf23, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_relu_0[grid(131072)](buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf26 = extern_kernels.convolution(buf25, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf27 = buf26 del buf26 triton_poi_fused_relu_0[grid(131072)](buf27, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf29 = buf28 del buf28 triton_poi_fused_relu_0[grid(131072)](buf29, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf30 = extern_kernels.convolution(buf29, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf31 = buf30 del buf30 triton_poi_fused_relu_0[grid(131072)](buf31, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf32 = extern_kernels.convolution(buf31, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf33 = buf32 del buf32 triton_poi_fused_relu_0[grid(131072)](buf33, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf34 = extern_kernels.convolution(buf33, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf35 = buf34 del buf34 triton_poi_fused_relu_0[grid(131072)](buf35, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 8, 64, 64), (32768, 4096, 64, 1)) buf37 = buf36 del buf36 triton_poi_fused_relu_0[grid(131072)](buf37, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1, 64, 64), (4096, 4096, 64, 1)) return (buf38, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37) def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class SmallVDSR_F8New(nn.Module): def __init__(self, model=False, fixed=False): super(SmallVDSR_F8New, self).__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 8, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(8, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU() if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) out20 = y return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, out20) def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_7 = self.conv6.weight primals_8 = self.conv7.weight primals_9 = self.conv8.weight primals_10 = self.conv9.weight primals_11 = self.conv10.weight primals_12 = self.conv11.weight primals_13 = self.conv12.weight primals_14 = self.conv13.weight primals_15 = self.conv14.weight primals_16 = self.conv15.weight primals_17 = self.conv16.weight primals_18 = self.conv17.weight primals_19 = self.conv18.weight primals_20 = self.conv19.weight primals_21 = self.conv20.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21]) return output[0]
MingSun-Tse/pytorch-vdsr
SmallVDSR_F8
false
5,611
[ "MIT" ]
1
597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
https://github.com/MingSun-Tse/pytorch-vdsr/tree/597bacb4ec7385c8cc6cdf91e26e64ef2e6808b7
import torch import torch.nn as nn def load_param(model1_path, model2): dict_param1 = torch.load(model1_path) dict_param2 = dict(model2.named_parameters()) for name2 in dict_param2: if name2 in dict_param1: dict_param2[name2].data.copy_(dict_param1[name2].data) model2.load_state_dict(dict_param2) return model2 class Model(nn.Module): def __init__(self, model=False, fixed=False): super().__init__() self.fixed = fixed self.conv1 = nn.Conv2d(1, 8, 3, 1, 1, bias=False) self.conv2 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv3 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv4 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv5 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv6 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv7 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv8 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv9 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv10 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv11 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv12 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv13 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv14 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv15 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv16 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv17 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv18 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv19 = nn.Conv2d(8, 8, 3, 1, 1, bias=False) self.conv20 = nn.Conv2d(8, 1, 3, 1, 1, bias=False) self.relu = nn.ReLU() if model: load_param(model, self) if fixed: for param in self.parameters(): param.requires_grad = False def forward_stem(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) out16 = y y = self.relu(self.conv17(y)) out17 = y y = self.relu(self.conv18(y)) out18 = y y = self.relu(self.conv19(y)) out19 = y y = self.conv20(y) return (out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15, out16, out17, out18, out19, y) def forward_dense(self, y): y = self.relu(self.conv1(y)) out1 = y y = self.relu(self.conv2(y)) out2 = y y = self.relu(self.conv3(y)) out3 = y y = self.relu(self.conv4(y)) out4 = y y = self.relu(self.conv5(y)) out5 = y y = self.relu(self.conv6(y)) out6 = y y = self.relu(self.conv7(y)) out7 = y y = self.relu(self.conv8(y)) out8 = y y = self.relu(self.conv9(y)) out9 = y y = self.relu(self.conv10(y)) out10 = y y = self.relu(self.conv11(y)) out11 = y y = self.relu(self.conv12(y)) out12 = y y = self.relu(self.conv13(y)) out13 = y y = self.relu(self.conv14(y)) out14 = y y = self.relu(self.conv15(y)) out15 = y y = self.relu(self.conv16(y)) # ... truncated (>4000 chars) for memory efficiency
Dense_block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/o4/co4zyxcl2qkqwco2hpqjmusaq55kf7f6hyk5tkf5vs6buom4p47w.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Dense_block(nn.Module): """ This is the initial dense block as in the paper """ def __init__(self, in_channels, out_channels): super(Dense_block, self).__init__() self.Dense = torch.nn.Linear(in_channels, out_channels) nn.init.xavier_uniform(self.Dense.weight.data, 1.0) self.activation = torch.nn.LeakyReLU(0.2) def forward(self, x): return self.activation(self.Dense(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class Dense_blockNew(nn.Module): """ This is the initial dense block as in the paper """ def __init__(self, in_channels, out_channels): super(Dense_blockNew, self).__init__() self.Dense = torch.nn.Linear(in_channels, out_channels) nn.init.xavier_uniform(self.Dense.weight.data, 1.0) self.activation = torch.nn.LeakyReLU(0.2) def forward(self, input_0): primals_1 = self.Dense.weight primals_2 = self.Dense.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Mohanned-Elkholy/ResNet-GAN
Dense_block
false
5,612
[ "MIT" ]
1
81b01294d8b5035131aee24d486e2cb879030832
https://github.com/Mohanned-Elkholy/ResNet-GAN/tree/81b01294d8b5035131aee24d486e2cb879030832
import torch import torch.nn as nn class Model(nn.Module): """ This is the initial dense block as in the paper """ def __init__(self, in_channels, out_channels): super().__init__() self.Dense = torch.nn.Linear(in_channels, out_channels) nn.init.xavier_uniform(self.Dense.weight.data, 1.0) self.activation = torch.nn.LeakyReLU(0.2) def forward(self, x): return self.activation(self.Dense(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
MatrixTree
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/mw/cmwulpxxw2jt763yw6dihufmcwawkztohwf6my2rtf33kl2nbgds.py # Topologically Sorted Source Nodes: [eye, ne, lap, sum_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye => eq, full_default, full_default_1, iota_1, where # lap => full_default_2, where_1 # ne => ne # sum_1 => sum_1 # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {}) # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne, %full_default_2, %select), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_1, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_0 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (x0), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0), xmask) tmp25 = tl.load(in_ptr0 + (8 + x0), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5f/c5fvishccr56l7e7obdlxzywjkbh3davnf5cdvnuqxihtgywozfd.py # Topologically Sorted Source Nodes: [eye, ne, lap, neg, diag, lap_1, diag_1, exp_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag => eq_1, full_default_3, iota_2, where_2 # diag_1 => diagonal_copy # exp_1 => exp_1 # eye => eq, full_default, full_default_1, iota_1, where # lap => full_default_2, where_1 # lap_1 => add_1 # ne => ne # neg => neg # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze, %iota_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %full_default_1), kwargs = {}) # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where, 0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne, %full_default_2, %select), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_1,), kwargs = {}) # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_2, %unsqueeze_2), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %permute, %full_default_3), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %where_2), kwargs = {}) # %diagonal_copy : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_1,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy,), kwargs = {}) # %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_1, %exp_1, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5k/c5kir7ouxg6vi6ka3fqtxb3atjlynhq3dcipimsepx55zvv5ls6k.py # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, sum_2], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye_1 => eq_3, full_default_7, full_default_8, iota_7, where_4 # lap_2 => full_default_9, where_5 # ne_1 => ne_1 # sum_2 => sum_2 # Graph fragment: # %iota_7 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_6, %iota_7), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_3, %full_default_7, %full_default_8), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_4, 0), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %full_default_9, %select_20), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_5, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_2 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (16 + x0), xmask) tmp16 = tl.load(in_ptr0 + (20 + x0), xmask) tmp25 = tl.load(in_ptr0 + (24 + x0), xmask) tmp34 = tl.load(in_ptr0 + (28 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ny/cnyxgqp3tmqmvliho4wkdr5wi6tnqd2oycsfgq3cjapc2phvva3v.py # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, neg_1, diag_5, lap_3, diag_6, exp_5], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag_5 => eq_4, full_default_10, iota_8, where_6 # diag_6 => diagonal_copy_3 # exp_5 => exp_5 # eye_1 => eq_3, full_default_7, full_default_8, iota_7, where_4 # lap_2 => full_default_9, where_5 # lap_3 => add_3 # ne_1 => ne_1 # neg_1 => neg_1 # Graph fragment: # %iota_7 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_6, %iota_7), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_3, %full_default_7, %full_default_8), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_4, 0), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %full_default_9, %select_20), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_5,), kwargs = {}) # %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_4 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_8, %unsqueeze_8), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_4, %permute_5, %full_default_10), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_1, %where_6), kwargs = {}) # %diagonal_copy_3 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_21,), kwargs = {}) # %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy_3,), kwargs = {}) # %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_3, %exp_5, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (16 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (16 + x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/76/c764os7jr65oallcf3eupkgmn4rajwnkb7qa2a4qrpedfegb2xji.py # Topologically Sorted Source Nodes: [diag_4, add_2], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_2 => add_2 # diag_4 => eq_2, full_default_6, iota_4, where_3 # Graph fragment: # %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_4, %unsqueeze_5), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_2, %permute_4, %full_default_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_16, %where_3), kwargs = {}) triton_poi_fused_add_diag_embed_4 = async_compile.triton('triton_poi_fused_add_diag_embed_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp4 = tl.load(in_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (x2), xmask) tmp18 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 0, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tmp2 == tmp0 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 0.0 tmp9 = tl.where(tmp3, tmp8, tmp7) tmp10 = x1 tmp11 = tmp10 == tmp0 tmp13 = tmp5 * tmp12 tmp14 = tl.where(tmp11, tmp8, tmp13) tmp15 = tmp9 - tmp14 tmp16 = tl.where(tmp1, tmp15, tmp4) tmp17 = tmp2 == tmp10 tmp19 = tl_math.exp(tmp18) tmp21 = tmp19 * tmp20 tmp22 = tl.where(tmp17, tmp21, tmp8) tmp23 = tmp16 + tmp22 tl.store(out_ptr0 + (x2), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ry/crysshrrigakeht5r6neboffuaxbkjaegs3cqmvdij2ytziz5vck.py # Topologically Sorted Source Nodes: [exp_2, mul, setitem_1, exp_3, mul_1, setitem_2, sub, diag_4, add_2], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_2 => add_2 # diag_4 => eq_2, full_default_6, iota_4, where_3 # exp_2 => exp_2 # exp_3 => exp_3 # mul => mul # mul_1 => mul_1 # setitem_1 => copy_1, full_default_4 # setitem_2 => copy_2, full_default_5 # sub => sub # Graph fragment: # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_5,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_2, %permute_1), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_7, %full_default_4), kwargs = {}) # %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul, %copy_1, 1, 0), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_6,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %permute_2), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_9, %full_default_5), kwargs = {}) # %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_1, %copy_2, 0, 0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_1, %select_scatter_default_2), kwargs = {}) # %select_scatter_default_3 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%arg0_1, %sub, 0, 0), kwargs = {}) # %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_4, %unsqueeze_5), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_2, %permute_4, %full_default_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_16, %where_3), kwargs = {}) # %select_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %add_2, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tmp4 == tmp1 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 * tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp5, tmp10, tmp9) tmp12 = x1 tmp13 = tmp12 == tmp1 tmp15 = tmp7 * tmp14 tmp16 = tl.where(tmp13, tmp10, tmp15) tmp17 = tmp11 - tmp16 tmp19 = tl.where(tmp2, tmp17, tmp18) tmp20 = tl.where(tmp2, tmp3, tmp19) tl.store(out_ptr0 + (x5), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ds/cdsgimfrigpgjvo5w5eo5m4acelh6kweus6l5g6oa5euxfo5zryv.py # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, sum_3], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye_2 => eq_6, full_default_14, full_default_15, iota_13, where_8 # lap_4 => full_default_16, where_9 # ne_2 => ne_2 # sum_3 => sum_3 # Graph fragment: # %iota_13 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_6 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_12, %iota_13), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_6, %full_default_14, %full_default_15), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_8, 0), kwargs = {}) # %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_2, %full_default_16, %select_41), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_9, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_6 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (32 + x0), xmask) tmp16 = tl.load(in_ptr0 + (36 + x0), xmask) tmp25 = tl.load(in_ptr0 + (40 + x0), xmask) tmp34 = tl.load(in_ptr0 + (44 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bx/cbx52hvmunlgryyn5hbdsqyfni653yiknzgubzvil2q6yaseomvy.py # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, neg_2, diag_10, lap_5, diag_11, exp_9], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag_10 => eq_7, full_default_17, iota_14, where_10 # diag_11 => diagonal_copy_6 # exp_9 => exp_9 # eye_2 => eq_6, full_default_14, full_default_15, iota_13, where_8 # lap_4 => full_default_16, where_9 # lap_5 => add_5 # ne_2 => ne_2 # neg_2 => neg_2 # Graph fragment: # %iota_13 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_6 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_12, %iota_13), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_6, %full_default_14, %full_default_15), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_8, 0), kwargs = {}) # %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_2, %full_default_16, %select_41), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_9,), kwargs = {}) # %iota_14 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_7 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_14, %unsqueeze_14), kwargs = {}) # %full_default_17 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_10 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_7, %permute_10, %full_default_17), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_2, %where_10), kwargs = {}) # %diagonal_copy_6 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_42,), kwargs = {}) # %exp_9 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy_6,), kwargs = {}) # %select_scatter_default_9 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_5, %exp_9, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (32 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (32 + x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mz/cmzl5zoke7j2kqgzcvjpkpdzkg2kwyb47nzibimhi2hklzb5s6ut.py # Topologically Sorted Source Nodes: [diag_9, add_4], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_4 => add_4 # diag_9 => eq_5, full_default_13, iota_10, where_7 # Graph fragment: # %iota_10 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_5 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_10, %unsqueeze_11), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_5, %permute_9, %full_default_13), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_37, %where_7), kwargs = {}) triton_poi_fused_add_diag_embed_8 = async_compile.triton('triton_poi_fused_add_diag_embed_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp5 = tl.load(in_ptr0 + (16 + x2), xmask) tmp7 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (x2), xmask) tmp17 = tl.load(in_ptr2 + (16 + x2), xmask) tmp20 = tl.load(in_ptr0 + (16 + (5*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 1, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4m/c4mfewqtg74k4hoz5gcjnz5z3hfa6tzmoe5hz6cil2jwbmx7cosw.py # Topologically Sorted Source Nodes: [exp_6, mul_3, setitem_6, exp_7, mul_4, setitem_7, sub_1, diag_9, add_4], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_4 => add_4 # diag_9 => eq_5, full_default_13, iota_10, where_7 # exp_6 => exp_6 # exp_7 => exp_7 # mul_3 => mul_3 # mul_4 => mul_4 # setitem_6 => copy_6, full_default_11 # setitem_7 => copy_7, full_default_12 # sub_1 => sub_1 # Graph fragment: # %exp_6 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_25,), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_6, %permute_6), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_27, %full_default_11), kwargs = {}) # %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_3, %copy_6, 1, 0), kwargs = {}) # %exp_7 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_26,), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_7, %permute_7), kwargs = {}) # %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_29, %full_default_12), kwargs = {}) # %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_4, %copy_7, 0, 0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_6, %select_scatter_default_7), kwargs = {}) # %select_scatter_default_8 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %sub_1, 0, 1), kwargs = {}) # %iota_10 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_5 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_10, %unsqueeze_11), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_5, %permute_9, %full_default_13), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_37, %where_7), kwargs = {}) # %select_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_8, %add_4, 0, 1), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + (x5), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/f4/cf43dymhxq3xler6ijzfxsjgm3oemfpzsjaoarspoimxqod3bkiv.py # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, sum_4], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] # Source node to ATen node mapping: # eye_3 => eq_9, full_default_21, full_default_22, iota_19, where_12 # lap_6 => full_default_23, where_13 # ne_3 => ne_3 # sum_4 => sum_4 # Graph fragment: # %iota_19 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_9 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_18, %iota_19), kwargs = {}) # %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_22 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_9, %full_default_21, %full_default_22), kwargs = {}) # %ne_3 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_12, 0), kwargs = {}) # %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_3, %full_default_23, %select_62), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_13, [0]), kwargs = {}) triton_poi_fused_eye_masked_fill_ne_sum_10 = async_compile.triton('triton_poi_fused_eye_masked_fill_ne_sum_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eye_masked_fill_ne_sum_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (48 + x0), xmask) tmp16 = tl.load(in_ptr0 + (52 + x0), xmask) tmp25 = tl.load(in_ptr0 + (56 + x0), xmask) tmp34 = tl.load(in_ptr0 + (60 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sg/csgmzo5h24yeta5phehu6lzwmojkxl6w5o6pxa7fu4tywve6h72o.py # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, neg_3, diag_15, lap_7, diag_16, exp_13], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] # Source node to ATen node mapping: # diag_15 => eq_10, full_default_24, iota_20, where_14 # diag_16 => diagonal_copy_9 # exp_13 => exp_13 # eye_3 => eq_9, full_default_21, full_default_22, iota_19, where_12 # lap_6 => full_default_23, where_13 # lap_7 => add_7 # ne_3 => ne_3 # neg_3 => neg_3 # Graph fragment: # %iota_19 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_9 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_18, %iota_19), kwargs = {}) # %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_22 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_9, %full_default_21, %full_default_22), kwargs = {}) # %ne_3 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%where_12, 0), kwargs = {}) # %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ne_3, %full_default_23, %select_62), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%where_13,), kwargs = {}) # %iota_20 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_10 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_20, %unsqueeze_20), kwargs = {}) # %full_default_24 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_14 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_10, %permute_15, %full_default_24), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg_3, %where_14), kwargs = {}) # %diagonal_copy_9 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%select_63,), kwargs = {}) # %exp_13 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%diagonal_copy_9,), kwargs = {}) # %select_scatter_default_14 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_7, %exp_13, 0, 0), kwargs = {}) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11 = async_compile.triton('triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (48 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x2), xmask) tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/s3/cs3gvd4vxrcwwln23ccoasr7omajc7scigkbutbxw7gq332qtsp2.py # Topologically Sorted Source Nodes: [diag_14, add_6], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_6 => add_6 # diag_14 => eq_8, full_default_20, iota_16, where_11 # Graph fragment: # %iota_16 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_8 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_16, %unsqueeze_17), kwargs = {}) # %full_default_20 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_11 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_8, %permute_14, %full_default_20), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_58, %where_11), kwargs = {}) triton_poi_fused_add_diag_embed_12 = async_compile.triton('triton_poi_fused_add_diag_embed_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp5 = tl.load(in_ptr0 + (32 + x2), xmask) tmp7 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (x2), xmask) tmp17 = tl.load(in_ptr2 + (32 + x2), xmask) tmp20 = tl.load(in_ptr0 + (32 + (5*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 2, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/me/cmej2qnos2ywrczshe25nqkszxvcm53ics5lgh4v3txzow4adwk6.py # Topologically Sorted Source Nodes: [exp_10, mul_6, setitem_11, exp_11, mul_7, setitem_12, sub_2, diag_14, add_6], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_6 => add_6 # diag_14 => eq_8, full_default_20, iota_16, where_11 # exp_10 => exp_10 # exp_11 => exp_11 # mul_6 => mul_6 # mul_7 => mul_7 # setitem_11 => copy_11, full_default_18 # setitem_12 => copy_12, full_default_19 # sub_2 => sub_2 # Graph fragment: # %exp_10 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_46,), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_10, %permute_11), kwargs = {}) # %full_default_18 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_48, %full_default_18), kwargs = {}) # %select_scatter_default_11 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_6, %copy_11, 1, 0), kwargs = {}) # %exp_11 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_47,), kwargs = {}) # %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_11, %permute_12), kwargs = {}) # %full_default_19 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_50, %full_default_19), kwargs = {}) # %select_scatter_default_12 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_7, %copy_12, 0, 0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_11, %select_scatter_default_12), kwargs = {}) # %select_scatter_default_13 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_10, %sub_2, 0, 2), kwargs = {}) # %iota_16 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_8 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_16, %unsqueeze_17), kwargs = {}) # %full_default_20 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_11 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_8, %permute_14, %full_default_20), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_58, %where_11), kwargs = {}) # %select_scatter_default_15 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_13, %add_6, 0, 2), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + (x5), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7m/c7mcscnhipdy5wvhd42yefmq35fu4oqgmcy7p53evfyoamhb6fsp.py # Topologically Sorted Source Nodes: [diag_19, add_8], Original ATen: [aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_8 => add_8 # diag_19 => eq_11, full_default_27, iota_22, where_15 # Graph fragment: # %iota_22 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_11 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_22, %unsqueeze_23), kwargs = {}) # %full_default_27 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_15 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_11, %permute_19, %full_default_27), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_79, %where_15), kwargs = {}) triton_poi_fused_add_diag_embed_14 = async_compile.triton('triton_poi_fused_add_diag_embed_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp5 = tl.load(in_ptr0 + (48 + x2), xmask) tmp7 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (x2), xmask) tmp17 = tl.load(in_ptr2 + (48 + x2), xmask) tmp20 = tl.load(in_ptr0 + (48 + (5*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 3, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xd/cxdxskm7n54mdwp6h2mbtokxdjclp2po3usor7fw3tqm473pzik6.py # Topologically Sorted Source Nodes: [exp_14, mul_9, setitem_16, exp_15, mul_10, setitem_17, sub_3, diag_19, add_8], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] # Source node to ATen node mapping: # add_8 => add_8 # diag_19 => eq_11, full_default_27, iota_22, where_15 # exp_14 => exp_14 # exp_15 => exp_15 # mul_10 => mul_10 # mul_9 => mul_9 # setitem_16 => copy_16, full_default_25 # setitem_17 => copy_17, full_default_26 # sub_3 => sub_3 # Graph fragment: # %exp_14 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_67,), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_14, %permute_16), kwargs = {}) # %full_default_25 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_16 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_69, %full_default_25), kwargs = {}) # %select_scatter_default_16 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_9, %copy_16, 1, 0), kwargs = {}) # %exp_15 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%select_68,), kwargs = {}) # %mul_10 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_15, %permute_17), kwargs = {}) # %full_default_26 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_17 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_71, %full_default_26), kwargs = {}) # %select_scatter_default_17 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%mul_10, %copy_17, 0, 0), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_16, %select_scatter_default_17), kwargs = {}) # %select_scatter_default_18 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_15, %sub_3, 0, 3), kwargs = {}) # %iota_22 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq_11 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota_22, %unsqueeze_23), kwargs = {}) # %full_default_27 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_15 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_11, %permute_19, %full_default_27), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_79, %where_15), kwargs = {}) # %select_scatter_default_19 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_18, %add_8, 0, 3), kwargs = {}) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15 = async_compile.triton('triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x1 = (xindex // 4) % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (48 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (5*x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + (x5), xmask) tmp0 = x2 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + (x5), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [eye, ne, lap, sum_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_eye_masked_fill_ne_sum_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [eye, ne, lap, neg, diag, lap_1, diag_1, exp_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1.run(arg0_1, buf0, buf1, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [eye, ne, lap, neg, diag, lap_1, diag_1, exp_1, inv_laplacian], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf2 = torch.ops.aten.linalg_inv_ex.default(buf1) buf3 = buf2[0] del buf2 buf5 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, sum_2], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] triton_poi_fused_eye_masked_fill_ne_sum_2.run(arg0_1, buf5, 4, grid=grid(4), stream=stream0) buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, neg_1, diag_5, lap_3, diag_6, exp_5], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3.run(arg0_1, buf5, buf6, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [eye_1, ne_1, lap_2, neg_1, diag_5, lap_3, diag_6, exp_5, inv_laplacian_1], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf7 = torch.ops.aten.linalg_inv_ex.default(buf6) buf8 = buf7[0] del buf7 buf10 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [diag_4, add_2], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_4.run(arg0_1, buf3, buf10, 16, grid=grid(16), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp_2, mul, setitem_1, exp_3, mul_1, setitem_2, sub, diag_4, add_2], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5.run(buf10, arg0_1, buf3, buf11, 64, grid=grid(64), stream=stream0) del buf10 buf12 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, sum_3], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] triton_poi_fused_eye_masked_fill_ne_sum_6.run(arg0_1, buf12, 4, grid=grid(4), stream=stream0) buf13 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, neg_2, diag_10, lap_5, diag_11, exp_9], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7.run(arg0_1, buf12, buf13, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [eye_2, ne_2, lap_4, neg_2, diag_10, lap_5, diag_11, exp_9, inv_laplacian_2], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf14 = torch.ops.aten.linalg_inv_ex.default(buf13) buf15 = buf14[0] del buf14 buf17 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [diag_9, add_4], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_8.run(arg0_1, buf8, buf11, buf17, 16, grid=grid(16), stream=stream0) buf18 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [exp_6, mul_3, setitem_6, exp_7, mul_4, setitem_7, sub_1, diag_9, add_4], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9.run(buf18, buf17, arg0_1, buf8, 64, grid=grid(64), stream=stream0) del buf17 buf19 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, sum_4], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.sum] triton_poi_fused_eye_masked_fill_ne_sum_10.run(arg0_1, buf19, 4, grid=grid(4), stream=stream0) buf20 = reinterpret_tensor(buf8, (4, 4), (4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, neg_3, diag_15, lap_7, diag_16, exp_13], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp] triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11.run(arg0_1, buf19, buf20, 16, grid=grid(16), stream=stream0) del buf19 # Topologically Sorted Source Nodes: [eye_3, ne_3, lap_6, neg_3, diag_15, lap_7, diag_16, exp_13, inv_laplacian_3], Original ATen: [aten.eye, aten.ne, aten.masked_fill, aten.neg, aten.diag_embed, aten.add, aten.diagonal_copy, aten.exp, aten.linalg_inv_ex] buf21 = torch.ops.aten.linalg_inv_ex.default(buf20) buf22 = buf21[0] del buf21 buf24 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [diag_14, add_6], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_12.run(arg0_1, buf15, buf18, buf24, 16, grid=grid(16), stream=stream0) buf25 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [exp_10, mul_6, setitem_11, exp_11, mul_7, setitem_12, sub_2, diag_14, add_6], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13.run(buf25, buf24, arg0_1, buf15, 64, grid=grid(64), stream=stream0) del buf15 buf26 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [diag_19, add_8], Original ATen: [aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_14.run(arg0_1, buf22, buf25, buf26, 16, grid=grid(16), stream=stream0) buf27 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [exp_14, mul_9, setitem_16, exp_15, mul_10, setitem_17, sub_3, diag_19, add_8], Original ATen: [aten.exp, aten.mul, aten.lift_fresh, aten.fill, aten.sub, aten.diag_embed, aten.add] triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15.run(buf27, buf26, arg0_1, buf22, 64, grid=grid(64), stream=stream0) del arg0_1 del buf22 del buf26 return (buf27, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda import torch.distributed class MatrixTree(nn.Module): """Implementation of the matrix-tree theorem for computing marginals of non-projective dependency parsing. This attention layer is used in the paper "Learning Structured Text Representations" :cite:`DBLP:journals/corr/LiuL17d`. """ def __init__(self, eps=1e-05): self.eps = eps super(MatrixTree, self).__init__() def forward(self, input): laplacian = input.exp() + self.eps output = input.clone() for b in range(input.size(0)): lap = laplacian[b].masked_fill(torch.eye(input.size(1), device= input.device).ne(0), 0) lap = -lap + torch.diag(lap.sum(0)) lap[0] = input[b].diag().exp() inv_laplacian = lap.inverse() factor = inv_laplacian.diag().unsqueeze(1).expand_as(input[b] ).transpose(0, 1) term1 = input[b].exp().mul(factor).clone() term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone() term1[:, 0] = 0 term2[0] = 0 output[b] = term1 - term2 roots_output = input[b].diag().exp().mul(inv_laplacian. transpose(0, 1)[0]) output[b] = output[b] + torch.diag(roots_output) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + x0, xmask) tmp16 = tl.load(in_ptr0 + (4 + x0), xmask) tmp25 = tl.load(in_ptr0 + (8 + x0), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + x2, xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (16 + x0), xmask) tmp16 = tl.load(in_ptr0 + (20 + x0), xmask) tmp25 = tl.load(in_ptr0 + (24 + x0), xmask) tmp34 = tl.load(in_ptr0 + (28 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (16 + x2), xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp4 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + x2, xmask) tmp18 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 0, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tmp2 == tmp0 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 0.0 tmp9 = tl.where(tmp3, tmp8, tmp7) tmp10 = x1 tmp11 = tmp10 == tmp0 tmp13 = tmp5 * tmp12 tmp14 = tl.where(tmp11, tmp8, tmp13) tmp15 = tmp9 - tmp14 tmp16 = tl.where(tmp1, tmp15, tmp4) tmp17 = tmp2 == tmp10 tmp19 = tl_math.exp(tmp18) tmp21 = tmp19 * tmp20 tmp22 = tl.where(tmp17, tmp21, tmp8) tmp23 = tmp16 + tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tmp4 == tmp1 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 * tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp5, tmp10, tmp9) tmp12 = x1 tmp13 = tmp12 == tmp1 tmp15 = tmp7 * tmp14 tmp16 = tl.where(tmp13, tmp10, tmp15) tmp17 = tmp11 - tmp16 tmp19 = tl.where(tmp2, tmp17, tmp18) tmp20 = tl.where(tmp2, tmp3, tmp19) tl.store(out_ptr0 + x5, tmp20, xmask) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (32 + x0), xmask) tmp16 = tl.load(in_ptr0 + (36 + x0), xmask) tmp25 = tl.load(in_ptr0 + (40 + x0), xmask) tmp34 = tl.load(in_ptr0 + (44 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (32 + x2), xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp5 = tl.load(in_ptr0 + (16 + x2), xmask) tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr2 + (16 + x2), xmask) tmp20 = tl.load(in_ptr0 + (16 + 5 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 1, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + x5, tmp21, xmask) @triton.jit def triton_poi_fused_eye_masked_fill_ne_sum_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp7 = tl.load(in_ptr0 + (48 + x0), xmask) tmp16 = tl.load(in_ptr0 + (52 + x0), xmask) tmp25 = tl.load(in_ptr0 + (56 + x0), xmask) tmp34 = tl.load(in_ptr0 + (60 + x0), xmask) tmp0 = tl.full([1], 0, tl.int64) tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 != tmp4 tmp8 = tl_math.exp(tmp7) tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = tl.where(tmp6, tmp4, tmp10) tmp12 = tl.full([1], 1, tl.int64) tmp13 = tmp12 == tmp1 tmp14 = tl.where(tmp13, tmp3, tmp4) tmp15 = tmp14 != tmp4 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 + tmp9 tmp19 = tl.where(tmp15, tmp4, tmp18) tmp20 = tmp11 + tmp19 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp21 == tmp1 tmp23 = tl.where(tmp22, tmp3, tmp4) tmp24 = tmp23 != tmp4 tmp26 = tl_math.exp(tmp25) tmp27 = tmp26 + tmp9 tmp28 = tl.where(tmp24, tmp4, tmp27) tmp29 = tmp20 + tmp28 tmp30 = tl.full([1], 3, tl.int64) tmp31 = tmp30 == tmp1 tmp32 = tl.where(tmp31, tmp3, tmp4) tmp33 = tmp32 != tmp4 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 + tmp9 tmp37 = tl.where(tmp33, tmp4, tmp36) tmp38 = tmp29 + tmp37 tl.store(out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp3 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (48 + x2), xmask) tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl_math.exp(tmp3) tmp5 = x0 tmp6 = tmp0 == tmp5 tmp7 = 1.0 tmp8 = 0.0 tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp9 != tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tl.where(tmp10, tmp8, tmp14) tmp16 = -tmp15 tmp17 = tmp5 == tmp0 tmp19 = tl.where(tmp17, tmp18, tmp8) tmp20 = tmp16 + tmp19 tmp21 = tl.where(tmp2, tmp4, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp5 = tl.load(in_ptr0 + (32 + x2), xmask) tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr2 + (32 + x2), xmask) tmp20 = tl.load(in_ptr0 + (32 + 5 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 2, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + x5, tmp21, xmask) @triton.jit def triton_poi_fused_add_diag_embed_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp5 = tl.load(in_ptr0 + (48 + x2), xmask) tmp7 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x2, xmask) tmp17 = tl.load(in_ptr2 + (48 + x2), xmask) tmp20 = tl.load(in_ptr0 + (48 + 5 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp0 = tl.full([1], 3, tl.int32) tmp1 = tmp0 == tmp0 tmp2 = x0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp2 == tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp4, tmp9, tmp8) tmp11 = x1 tmp12 = tmp11 == tmp3 tmp14 = tmp6 * tmp13 tmp15 = tl.where(tmp12, tmp9, tmp14) tmp16 = tmp10 - tmp15 tmp18 = tl.where(tmp1, tmp16, tmp17) tmp19 = tmp2 == tmp11 tmp21 = tl_math.exp(tmp20) tmp23 = tmp21 * tmp22 tmp24 = tl.where(tmp19, tmp23, tmp9) tmp25 = tmp18 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x1 = xindex // 4 % 4 x5 = xindex tmp3 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (48 + x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + 5 * x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + x5, xmask) tmp0 = x2 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = x0 tmp5 = tl.full([1], 0, tl.int32) tmp6 = tmp4 == tmp5 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp6, tmp11, tmp10) tmp13 = x1 tmp14 = tmp13 == tmp5 tmp16 = tmp8 * tmp15 tmp17 = tl.where(tmp14, tmp11, tmp16) tmp18 = tmp12 - tmp17 tmp20 = tl.where(tmp2, tmp18, tmp19) tmp21 = tl.where(tmp2, tmp3, tmp20) tl.store(in_out_ptr0 + x5, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_eye_masked_fill_ne_sum_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_1[ grid(16)](arg0_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = torch.ops.aten.linalg_inv_ex.default(buf1) buf3 = buf2[0] del buf2 buf5 = buf0 del buf0 triton_poi_fused_eye_masked_fill_ne_sum_2[grid(4)](arg0_1, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = buf1 del buf1 triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_3[ grid(16)](arg0_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = torch.ops.aten.linalg_inv_ex.default(buf6) buf8 = buf7[0] del buf7 buf10 = buf6 del buf6 triton_poi_fused_add_diag_embed_4[grid(16)](arg0_1, buf3, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_5[grid(64) ](buf10, arg0_1, buf3, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 buf12 = buf5 del buf5 triton_poi_fused_eye_masked_fill_ne_sum_6[grid(4)](arg0_1, buf12, 4, XBLOCK=4, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0) del buf3 triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_7[ grid(16)](arg0_1, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = torch.ops.aten.linalg_inv_ex.default(buf13) buf15 = buf14[0] del buf14 buf17 = buf13 del buf13 triton_poi_fused_add_diag_embed_8[grid(16)](arg0_1, buf8, buf11, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = buf11 del buf11 triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_9[grid(64) ](buf18, buf17, arg0_1, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 buf19 = buf12 del buf12 triton_poi_fused_eye_masked_fill_ne_sum_10[grid(4)](arg0_1, buf19, 4, XBLOCK=4, num_warps=1, num_stages=1) buf20 = reinterpret_tensor(buf8, (4, 4), (4, 1), 0) del buf8 triton_poi_fused_add_diag_embed_diagonal_copy_exp_eye_masked_fill_ne_neg_11[ grid(16)](arg0_1, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf19 buf21 = torch.ops.aten.linalg_inv_ex.default(buf20) buf22 = buf21[0] del buf21 buf24 = buf20 del buf20 triton_poi_fused_add_diag_embed_12[grid(16)](arg0_1, buf15, buf18, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = buf18 del buf18 triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_13[grid(64) ](buf25, buf24, arg0_1, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf15 buf26 = buf24 del buf24 triton_poi_fused_add_diag_embed_14[grid(16)](arg0_1, buf22, buf25, buf26, 16, XBLOCK=16, num_warps=1, num_stages=1) buf27 = buf25 del buf25 triton_poi_fused_add_diag_embed_exp_fill_lift_fresh_mul_sub_15[grid(64) ](buf27, buf26, arg0_1, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf22 del buf26 return buf27, class MatrixTreeNew(nn.Module): """Implementation of the matrix-tree theorem for computing marginals of non-projective dependency parsing. This attention layer is used in the paper "Learning Structured Text Representations" :cite:`DBLP:journals/corr/LiuL17d`. """ def __init__(self, eps=1e-05): self.eps = eps super(MatrixTreeNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MaxatTezekbayev/OpenNMT-py-lexical
MatrixTree
false
5,613
[ "MIT" ]
1
44182999b863fc4074d67e0281c5bdab19abddfe
https://github.com/MaxatTezekbayev/OpenNMT-py-lexical/tree/44182999b863fc4074d67e0281c5bdab19abddfe
import torch import torch.nn as nn import torch.cuda import torch.distributed class Model(nn.Module): """Implementation of the matrix-tree theorem for computing marginals of non-projective dependency parsing. This attention layer is used in the paper "Learning Structured Text Representations" :cite:`DBLP:journals/corr/LiuL17d`. """ def __init__(self, eps=1e-05): self.eps = eps super().__init__() def forward(self, input): laplacian = input.exp() + self.eps output = input.clone() for b in range(input.size(0)): lap = laplacian[b].masked_fill(torch.eye(input.size(1), device= input.device).ne(0), 0) lap = -lap + torch.diag(lap.sum(0)) lap[0] = input[b].diag().exp() inv_laplacian = lap.inverse() factor = inv_laplacian.diag().unsqueeze(1).expand_as(input[b] ).transpose(0, 1) term1 = input[b].exp().mul(factor).clone() term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone() term1[:, 0] = 0 term2[0] = 0 output[b] = term1 - term2 roots_output = input[b].diag().exp().mul(inv_laplacian. transpose(0, 1)[0]) output[b] = output[b] + torch.diag(roots_output) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return []
RandomShiftsAug
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sq/csqzbjgigod7iwdpblwlrtqegrwxplnexkan6oxv2b3voplnbg6n.py # Topologically Sorted Source Nodes: [base_grid], Original ATen: [aten.cat] # Source node to ATen node mapping: # base_grid => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %permute], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 4 x2 = (xindex // 8) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = 6.0 tmp8 = tmp6 < tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp6 * tmp9 tmp11 = -0.9166666666666666 tmp12 = tmp10 + tmp11 tmp13 = 11 + ((-1)*x1) tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 * tmp9 tmp16 = 0.9166666666666666 tmp17 = tmp16 - tmp15 tmp18 = tl.where(tmp8, tmp12, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tmp22 = tl.full([1], 2, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = x2 tmp25 = tmp24.to(tl.float32) tmp26 = tmp25 < tmp7 tmp27 = tmp25 * tmp9 tmp28 = tmp27 + tmp11 tmp29 = 11 + ((-1)*x2) tmp30 = tmp29.to(tl.float32) tmp31 = tmp30 * tmp9 tmp32 = tmp16 - tmp31 tmp33 = tl.where(tmp26, tmp28, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp21, tmp33, tmp34) tmp36 = tl.where(tmp4, tmp20, tmp35) tl.store(out_ptr0 + (x4), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ec/cec7fujvvbe4hi4zs7gswfcyerr6hgw7amw2cirx7s2wg224nm7z.py # Topologically Sorted Source Nodes: [x_1, grid_sample], Original ATen: [aten.replication_pad2d, aten.grid_sampler_2d] # Source node to ATen node mapping: # grid_sample => add_2, add_3, add_4, add_5, add_6, add_7, add_8, convert_element_type_4, convert_element_type_5, convert_element_type_6, convert_element_type_7, convert_element_type_8, convert_element_type_9, floor, floor_1, full_default_10, full_default_11, full_default_2, full_default_3, full_default_4, full_default_5, full_default_6, full_default_7, full_default_8, full_default_9, ge, ge_1, ge_2, ge_3, ge_4, ge_5, ge_6, ge_7, index, index_1, index_2, index_3, logical_and, logical_and_1, logical_and_10, logical_and_11, logical_and_2, logical_and_3, logical_and_4, logical_and_5, logical_and_6, logical_and_7, logical_and_8, logical_and_9, lt_1, lt_2, lt_3, lt_4, lt_5, lt_6, lt_7, lt_8, mul_10, mul_11, mul_12, mul_3, mul_4, mul_5, mul_6, mul_7, mul_8, mul_9, sub_2, sub_3, sub_4, sub_5, sub_6, sub_7, sub_8, sub_9, where_10, where_11, where_12, where_3, where_4, where_5, where_6, where_7, where_8, where_9 # x_1 => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=4] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 6.0), kwargs = {}) # %add_2 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 5.5), kwargs = {}) # %floor : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_2,), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor, 0), kwargs = {}) # %lt_1 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor, 12), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, 6.0), kwargs = {}) # %add_3 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 5.5), kwargs = {}) # %floor_1 : [num_users=9] = call_function[target=torch.ops.aten.floor.default](args = (%add_3,), kwargs = {}) # %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_1, 0), kwargs = {}) # %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_1, 12), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_2), kwargs = {}) # %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_1, %logical_and), kwargs = {}) # %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_2, %where_1]), kwargs = {}) # %add_4 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor, 1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %add_2), kwargs = {}) # %add_5 : [num_users=8] = call_function[target=torch.ops.aten.add.Tensor](args = (%floor_1, 1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sub_3), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %mul_5, %full_default_2), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_3), kwargs = {}) # %ge_2 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_4, 0), kwargs = {}) # %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_4, 12), kwargs = {}) # %ge_3 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor_1, 0), kwargs = {}) # %lt_4 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor_1, 12), kwargs = {}) # %logical_and_3 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_3, %lt_4), kwargs = {}) # %logical_and_4 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_3, %logical_and_3), kwargs = {}) # %logical_and_5 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_2, %logical_and_4), kwargs = {}) # %convert_element_type_5 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor_1, torch.int64), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_5, %convert_element_type_5, %full_default_4), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.int64), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_5, %convert_element_type_4, %full_default_3), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_5, %where_4]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %floor), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %sub_5), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_5, %mul_6, %full_default_5), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_1, %where_6), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {}) # %ge_4 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%floor, 0), kwargs = {}) # %lt_5 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%floor, 12), kwargs = {}) # %ge_5 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_5, 0), kwargs = {}) # %lt_6 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_5, 12), kwargs = {}) # %logical_and_6 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_5, %lt_6), kwargs = {}) # %logical_and_7 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_5, %logical_and_6), kwargs = {}) # %logical_and_8 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_4, %logical_and_7), kwargs = {}) # %convert_element_type_7 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_5, torch.int64), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_8, %convert_element_type_7, %full_default_7), kwargs = {}) # %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%floor, torch.int64), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_8, %convert_element_type_6, %full_default_6), kwargs = {}) # %index_2 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_8, %where_7]), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %add_2), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %floor_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %sub_7), kwargs = {}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_8, %mul_7, %full_default_8), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %where_9), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_11), kwargs = {}) # %ge_6 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_4, 0), kwargs = {}) # %lt_7 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_4, 12), kwargs = {}) # %ge_7 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_5, 0), kwargs = {}) # %lt_8 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_5, 12), kwargs = {}) # %logical_and_9 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_7, %lt_8), kwargs = {}) # %logical_and_10 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_7, %logical_and_9), kwargs = {}) # %logical_and_11 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_6, %logical_and_10), kwargs = {}) # %convert_element_type_9 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_5, torch.int64), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_11 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %convert_element_type_9, %full_default_10), kwargs = {}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.int64), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_10 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %convert_element_type_8, %full_default_9), kwargs = {}) # %index_3 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%_unsafe_index_1, [%view_1, %view_2, %where_11, %where_10]), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %floor), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %floor_1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %sub_9), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_11, %mul_8, %full_default_11), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_3, %where_12), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_12), kwargs = {}) triton_poi_fused_grid_sampler_2d_replication_pad2d_1 = async_compile.triton('triton_poi_fused_grid_sampler_2d_replication_pad2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_grid_sampler_2d_replication_pad2d_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_grid_sampler_2d_replication_pad2d_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x5 = xindex x3 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (2*x2), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + (2*x2)), xmask, eviction_policy='evict_last') tmp2 = 0.16666666666666666 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 6.0 tmp6 = tmp4 * tmp5 tmp7 = 5.5 tmp8 = tmp6 + tmp7 tmp9 = libdevice.floor(tmp8) tmp10 = 0.0 tmp11 = tmp9 >= tmp10 tmp12 = 12.0 tmp13 = tmp9 < tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp18 = tmp17 * tmp5 tmp19 = tmp18 + tmp7 tmp20 = libdevice.floor(tmp19) tmp21 = tmp20 >= tmp10 tmp22 = tmp20 < tmp12 tmp23 = tmp21 & tmp22 tmp24 = tmp13 & tmp23 tmp25 = tmp11 & tmp24 tmp26 = 1.0 tmp27 = tmp9 + tmp26 tmp28 = tmp27 - tmp8 tmp29 = tmp20 + tmp26 tmp30 = tmp29 - tmp19 tmp31 = tmp28 * tmp30 tmp32 = tl.where(tmp25, tmp31, tmp10) tmp33 = tmp27 >= tmp10 tmp34 = tmp27 < tmp12 tmp35 = tmp34 & tmp23 tmp36 = tmp33 & tmp35 tmp37 = tmp8 - tmp9 tmp38 = tmp37 * tmp30 tmp39 = tl.where(tmp36, tmp38, tmp10) tmp40 = tmp29 >= tmp10 tmp41 = tmp29 < tmp12 tmp42 = tmp40 & tmp41 tmp43 = tmp34 & tmp42 tmp44 = tmp33 & tmp43 tmp45 = tmp19 - tmp20 tmp46 = tmp37 * tmp45 tmp47 = tl.where(tmp44, tmp46, tmp10) tmp48 = tmp13 & tmp42 tmp49 = tmp11 & tmp48 tmp50 = tmp28 * tmp45 tmp51 = tl.where(tmp49, tmp50, tmp10) tmp52 = tmp20.to(tl.int64) tmp53 = tl.full([1], 0, tl.int64) tmp54 = tl.where(tmp25, tmp52, tmp53) tmp55 = tl.full([XBLOCK], 12, tl.int32) tmp56 = tmp54 + tmp55 tmp57 = tmp54 < 0 tmp58 = tl.where(tmp57, tmp56, tmp54) tl.device_assert(((0 <= tmp58) & (tmp58 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp58 < 12") tmp60 = tmp9.to(tl.int64) tmp61 = tl.where(tmp25, tmp60, tmp53) tmp62 = tmp61 + tmp55 tmp63 = tmp61 < 0 tmp64 = tl.where(tmp63, tmp62, tmp61) tl.device_assert(((0 <= tmp64) & (tmp64 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp64 < 12") tmp66 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp58)) + ((-4) + tmp58) * (((-4) + tmp58) > (0))))) + (((0) * ((0) >= ((-4) + tmp58)) + ((-4) + tmp58) * (((-4) + tmp58) > (0)))) * ((((0) * ((0) >= ((-4) + tmp58)) + ((-4) + tmp58) * (((-4) + tmp58) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp64)) + ((-4) + tmp64) * (((-4) + tmp64) > (0))))) + (((0) * ((0) >= ((-4) + tmp64)) + ((-4) + tmp64) * (((-4) + tmp64) > (0)))) * ((((0) * ((0) >= ((-4) + tmp64)) + ((-4) + tmp64) * (((-4) + tmp64) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp67 = tl.where(tmp36, tmp52, tmp53) tmp68 = tmp27.to(tl.int64) tmp69 = tl.where(tmp36, tmp68, tmp53) tmp70 = tmp29.to(tl.int64) tmp71 = tl.where(tmp49, tmp70, tmp53) tmp72 = tl.where(tmp49, tmp60, tmp53) tmp73 = tmp66 * tmp32 tmp74 = tmp67 + tmp55 tmp75 = tmp67 < 0 tmp76 = tl.where(tmp75, tmp74, tmp67) tl.device_assert(((0 <= tmp76) & (tmp76 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp76 < 12") tmp78 = tmp69 + tmp55 tmp79 = tmp69 < 0 tmp80 = tl.where(tmp79, tmp78, tmp69) tl.device_assert(((0 <= tmp80) & (tmp80 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp80 < 12") tmp82 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp76)) + ((-4) + tmp76) * (((-4) + tmp76) > (0))))) + (((0) * ((0) >= ((-4) + tmp76)) + ((-4) + tmp76) * (((-4) + tmp76) > (0)))) * ((((0) * ((0) >= ((-4) + tmp76)) + ((-4) + tmp76) * (((-4) + tmp76) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp80)) + ((-4) + tmp80) * (((-4) + tmp80) > (0))))) + (((0) * ((0) >= ((-4) + tmp80)) + ((-4) + tmp80) * (((-4) + tmp80) > (0)))) * ((((0) * ((0) >= ((-4) + tmp80)) + ((-4) + tmp80) * (((-4) + tmp80) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp83 = tmp82 * tmp39 tmp84 = tmp73 + tmp83 tmp85 = tmp71 + tmp55 tmp86 = tmp71 < 0 tmp87 = tl.where(tmp86, tmp85, tmp71) tl.device_assert(((0 <= tmp87) & (tmp87 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp87 < 12") tmp89 = tmp72 + tmp55 tmp90 = tmp72 < 0 tmp91 = tl.where(tmp90, tmp89, tmp72) tl.device_assert(((0 <= tmp91) & (tmp91 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp91 < 12") tmp93 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp87)) + ((-4) + tmp87) * (((-4) + tmp87) > (0))))) + (((0) * ((0) >= ((-4) + tmp87)) + ((-4) + tmp87) * (((-4) + tmp87) > (0)))) * ((((0) * ((0) >= ((-4) + tmp87)) + ((-4) + tmp87) * (((-4) + tmp87) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp91)) + ((-4) + tmp91) * (((-4) + tmp91) > (0))))) + (((0) * ((0) >= ((-4) + tmp91)) + ((-4) + tmp91) * (((-4) + tmp91) > (0)))) * ((((0) * ((0) >= ((-4) + tmp91)) + ((-4) + tmp91) * (((-4) + tmp91) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp94 = tmp93 * tmp51 tmp95 = tmp84 + tmp94 tmp96 = tl.where(tmp44, tmp70, tmp53) tmp97 = tl.where(tmp44, tmp68, tmp53) tmp98 = tmp96 + tmp55 tmp99 = tmp96 < 0 tmp100 = tl.where(tmp99, tmp98, tmp96) tl.device_assert(((0 <= tmp100) & (tmp100 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp100 < 12") tmp102 = tmp97 + tmp55 tmp103 = tmp97 < 0 tmp104 = tl.where(tmp103, tmp102, tmp97) tl.device_assert(((0 <= tmp104) & (tmp104 < 12)) | ~(xmask), "index out of bounds: 0 <= tmp104 < 12") tmp106 = tl.load(in_ptr2 + ((4*((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp100)) + ((-4) + tmp100) * (((-4) + tmp100) > (0))))) + (((0) * ((0) >= ((-4) + tmp100)) + ((-4) + tmp100) * (((-4) + tmp100) > (0)))) * ((((0) * ((0) >= ((-4) + tmp100)) + ((-4) + tmp100) * (((-4) + tmp100) > (0)))) < (3)))) + (16*x3) + ((3) * ((3) <= (((0) * ((0) >= ((-4) + tmp104)) + ((-4) + tmp104) * (((-4) + tmp104) > (0))))) + (((0) * ((0) >= ((-4) + tmp104)) + ((-4) + tmp104) * (((-4) + tmp104) > (0)))) * ((((0) * ((0) >= ((-4) + tmp104)) + ((-4) + tmp104) * (((-4) + tmp104) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp107 = tmp106 * tmp47 tmp108 = tmp95 + tmp107 tl.store(in_out_ptr0 + (x5), tmp108, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [base_grid], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, 32, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [shift], Original ATen: [aten.randint] buf1 = torch.ops.aten.randint.low(0, 9, [4, 1, 1, 2], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf2 = buf1 del buf1 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = buf10; del buf10 # reuse buf15 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [x_1, grid_sample], Original ATen: [aten.replication_pad2d, aten.grid_sampler_2d] triton_poi_fused_grid_sampler_2d_replication_pad2d_1.run(buf15, buf0, buf2, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 del buf0 del buf2 return (buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RandomShiftsAug(nn.Module): def __init__(self, pad): super().__init__() self.pad = pad def forward(self, x): x = x.float() n, _c, h, w = x.size() assert h == w padding = tuple([self.pad] * 4) x = F.pad(x, padding, 'replicate') eps = 1.0 / (h + 2 * self.pad) arange = torch.linspace(-1.0 + eps, 1.0 - eps, h + 2 * self.pad, device=x.device, dtype=x.dtype)[:h] arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2) base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1) shift = torch.randint(0, 2 * self.pad + 1, size=(n, 1, 1, 2), device=x.device, dtype=x.dtype) shift *= 2.0 / (h + 2 * self.pad) grid = base_grid + shift return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'pad': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 4 x2 = xindex // 8 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = 6.0 tmp8 = tmp6 < tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp6 * tmp9 tmp11 = -0.9166666666666666 tmp12 = tmp10 + tmp11 tmp13 = 11 + -1 * x1 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 * tmp9 tmp16 = 0.9166666666666666 tmp17 = tmp16 - tmp15 tmp18 = tl.where(tmp8, tmp12, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp24 = x2 tmp25 = tmp24.to(tl.float32) tmp26 = tmp25 < tmp7 tmp27 = tmp25 * tmp9 tmp28 = tmp27 + tmp11 tmp29 = 11 + -1 * x2 tmp30 = tmp29.to(tl.float32) tmp31 = tmp30 * tmp9 tmp32 = tmp16 - tmp31 tmp33 = tl.where(tmp26, tmp28, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp21, tmp33, tmp34) tmp36 = tl.where(tmp4, tmp20, tmp35) tl.store(out_ptr0 + x4, tmp36, xmask) @triton.jit def triton_poi_fused_grid_sampler_2d_replication_pad2d_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x5 = xindex x3 = xindex // 16 tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 2 * x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (1 + 2 * x2), xmask, eviction_policy='evict_last' ) tmp2 = 0.16666666666666666 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 6.0 tmp6 = tmp4 * tmp5 tmp7 = 5.5 tmp8 = tmp6 + tmp7 tmp9 = libdevice.floor(tmp8) tmp10 = 0.0 tmp11 = tmp9 >= tmp10 tmp12 = 12.0 tmp13 = tmp9 < tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp18 = tmp17 * tmp5 tmp19 = tmp18 + tmp7 tmp20 = libdevice.floor(tmp19) tmp21 = tmp20 >= tmp10 tmp22 = tmp20 < tmp12 tmp23 = tmp21 & tmp22 tmp24 = tmp13 & tmp23 tmp25 = tmp11 & tmp24 tmp26 = 1.0 tmp27 = tmp9 + tmp26 tmp28 = tmp27 - tmp8 tmp29 = tmp20 + tmp26 tmp30 = tmp29 - tmp19 tmp31 = tmp28 * tmp30 tmp32 = tl.where(tmp25, tmp31, tmp10) tmp33 = tmp27 >= tmp10 tmp34 = tmp27 < tmp12 tmp35 = tmp34 & tmp23 tmp36 = tmp33 & tmp35 tmp37 = tmp8 - tmp9 tmp38 = tmp37 * tmp30 tmp39 = tl.where(tmp36, tmp38, tmp10) tmp40 = tmp29 >= tmp10 tmp41 = tmp29 < tmp12 tmp42 = tmp40 & tmp41 tmp43 = tmp34 & tmp42 tmp44 = tmp33 & tmp43 tmp45 = tmp19 - tmp20 tmp46 = tmp37 * tmp45 tmp47 = tl.where(tmp44, tmp46, tmp10) tmp48 = tmp13 & tmp42 tmp49 = tmp11 & tmp48 tmp50 = tmp28 * tmp45 tmp51 = tl.where(tmp49, tmp50, tmp10) tmp52 = tmp20.to(tl.int64) tmp53 = tl.full([1], 0, tl.int64) tmp54 = tl.where(tmp25, tmp52, tmp53) tmp55 = tl.full([XBLOCK], 12, tl.int32) tmp56 = tmp54 + tmp55 tmp57 = tmp54 < 0 tmp58 = tl.where(tmp57, tmp56, tmp54) tl.device_assert((0 <= tmp58) & (tmp58 < 12) | ~xmask, 'index out of bounds: 0 <= tmp58 < 12') tmp60 = tmp9.to(tl.int64) tmp61 = tl.where(tmp25, tmp60, tmp53) tmp62 = tmp61 + tmp55 tmp63 = tmp61 < 0 tmp64 = tl.where(tmp63, tmp62, tmp61) tl.device_assert((0 <= tmp64) & (tmp64 < 12) | ~xmask, 'index out of bounds: 0 <= tmp64 < 12') tmp66 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp58) + (-4 + tmp58) * (-4 + tmp58 > 0)) + (0 * (0 >= -4 + tmp58) + (-4 + tmp58) * (-4 + tmp58 > 0)) * (0 * (0 >= -4 + tmp58) + (-4 + tmp58) * (-4 + tmp58 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp64) + (-4 + tmp64) * (-4 + tmp64 > 0)) + (0 * (0 >= -4 + tmp64) + (-4 + tmp64) * (-4 + tmp64 > 0)) * (0 * (0 >= -4 + tmp64) + (-4 + tmp64) * (-4 + tmp64 > 0) < 3))), xmask, eviction_policy='evict_last') tmp67 = tl.where(tmp36, tmp52, tmp53) tmp68 = tmp27.to(tl.int64) tmp69 = tl.where(tmp36, tmp68, tmp53) tmp70 = tmp29.to(tl.int64) tmp71 = tl.where(tmp49, tmp70, tmp53) tmp72 = tl.where(tmp49, tmp60, tmp53) tmp73 = tmp66 * tmp32 tmp74 = tmp67 + tmp55 tmp75 = tmp67 < 0 tmp76 = tl.where(tmp75, tmp74, tmp67) tl.device_assert((0 <= tmp76) & (tmp76 < 12) | ~xmask, 'index out of bounds: 0 <= tmp76 < 12') tmp78 = tmp69 + tmp55 tmp79 = tmp69 < 0 tmp80 = tl.where(tmp79, tmp78, tmp69) tl.device_assert((0 <= tmp80) & (tmp80 < 12) | ~xmask, 'index out of bounds: 0 <= tmp80 < 12') tmp82 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp76) + (-4 + tmp76) * (-4 + tmp76 > 0)) + (0 * (0 >= -4 + tmp76) + (-4 + tmp76) * (-4 + tmp76 > 0)) * (0 * (0 >= -4 + tmp76) + (-4 + tmp76) * (-4 + tmp76 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp80) + (-4 + tmp80) * (-4 + tmp80 > 0)) + (0 * (0 >= -4 + tmp80) + (-4 + tmp80) * (-4 + tmp80 > 0)) * (0 * (0 >= -4 + tmp80) + (-4 + tmp80) * (-4 + tmp80 > 0) < 3))), xmask, eviction_policy='evict_last') tmp83 = tmp82 * tmp39 tmp84 = tmp73 + tmp83 tmp85 = tmp71 + tmp55 tmp86 = tmp71 < 0 tmp87 = tl.where(tmp86, tmp85, tmp71) tl.device_assert((0 <= tmp87) & (tmp87 < 12) | ~xmask, 'index out of bounds: 0 <= tmp87 < 12') tmp89 = tmp72 + tmp55 tmp90 = tmp72 < 0 tmp91 = tl.where(tmp90, tmp89, tmp72) tl.device_assert((0 <= tmp91) & (tmp91 < 12) | ~xmask, 'index out of bounds: 0 <= tmp91 < 12') tmp93 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp87) + (-4 + tmp87) * (-4 + tmp87 > 0)) + (0 * (0 >= -4 + tmp87) + (-4 + tmp87) * (-4 + tmp87 > 0)) * (0 * (0 >= -4 + tmp87) + (-4 + tmp87) * (-4 + tmp87 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp91) + (-4 + tmp91) * (-4 + tmp91 > 0)) + (0 * (0 >= -4 + tmp91) + (-4 + tmp91) * (-4 + tmp91 > 0)) * (0 * (0 >= -4 + tmp91) + (-4 + tmp91) * (-4 + tmp91 > 0) < 3))), xmask, eviction_policy='evict_last') tmp94 = tmp93 * tmp51 tmp95 = tmp84 + tmp94 tmp96 = tl.where(tmp44, tmp70, tmp53) tmp97 = tl.where(tmp44, tmp68, tmp53) tmp98 = tmp96 + tmp55 tmp99 = tmp96 < 0 tmp100 = tl.where(tmp99, tmp98, tmp96) tl.device_assert((0 <= tmp100) & (tmp100 < 12) | ~xmask, 'index out of bounds: 0 <= tmp100 < 12') tmp102 = tmp97 + tmp55 tmp103 = tmp97 < 0 tmp104 = tl.where(tmp103, tmp102, tmp97) tl.device_assert((0 <= tmp104) & (tmp104 < 12) | ~xmask, 'index out of bounds: 0 <= tmp104 < 12') tmp106 = tl.load(in_ptr2 + (4 * (3 * (3 <= 0 * (0 >= -4 + tmp100) + (-4 + tmp100) * (-4 + tmp100 > 0)) + (0 * (0 >= -4 + tmp100) + (-4 + tmp100) * (-4 + tmp100 > 0)) * (0 * (0 >= -4 + tmp100) + (-4 + tmp100) * (-4 + tmp100 > 0) < 3)) + 16 * x3 + (3 * (3 <= 0 * (0 >= -4 + tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0)) + (0 * (0 >= -4 + tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0)) * (0 * (0 >= -4 + tmp104) + (-4 + tmp104) * (-4 + tmp104 > 0) < 3))), xmask, eviction_policy='evict_last') tmp107 = tmp106 * tmp47 tmp108 = tmp95 + tmp107 tl.store(in_out_ptr0 + x5, tmp108, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) buf1 = torch.ops.aten.randint.low(0, 9, [4, 1, 1, 2], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf2 = buf1 del buf1 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = buf10 del buf10 buf15 = buf11 del buf11 triton_poi_fused_grid_sampler_2d_replication_pad2d_1[grid(256)](buf15, buf0, buf2, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf0 del buf2 return buf15, class RandomShiftsAugNew(nn.Module): def __init__(self, pad): super().__init__() self.pad = pad def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MishaLaskin/url_benchmark
RandomShiftsAug
false
5,614
[ "MIT" ]
1
a81aed0a0aec3a7dad83d930e54d480f97cf535d
https://github.com/MishaLaskin/url_benchmark/tree/a81aed0a0aec3a7dad83d930e54d480f97cf535d
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, pad): super().__init__() self.pad = pad def forward(self, x): x = x.float() n, _c, h, w = x.size() assert h == w padding = tuple([self.pad] * 4) x = F.pad(x, padding, 'replicate') eps = 1.0 / (h + 2 * self.pad) arange = torch.linspace(-1.0 + eps, 1.0 - eps, h + 2 * self.pad, device=x.device, dtype=x.dtype)[:h] arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2) base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1) shift = torch.randint(0, 2 * self.pad + 1, size=(n, 1, 1, 2), device=x.device, dtype=x.dtype) shift *= 2.0 / (h + 2 * self.pad) grid = base_grid + shift return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
FeedForwardNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, hid], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # hid => mul_3 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, hid], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class GELU(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class FeedForwardNetwork(nn.Module): def __init__(self, in_dim, hid_dim) ->None: super().__init__() self.lin1 = nn.Linear(in_dim, hid_dim) self.lin2 = nn.Linear(hid_dim, in_dim) self.gleu = GELU() self.dropout = nn.Dropout() def forward(self, inputs): hid = self.gleu(self.lin1(inputs)) return self.lin2(self.dropout(hid)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'hid_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 class GELU(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class FeedForwardNetworkNew(nn.Module): def __init__(self, in_dim, hid_dim) ->None: super().__init__() self.lin1 = nn.Linear(in_dim, hid_dim) self.lin2 = nn.Linear(hid_dim, in_dim) self.gleu = GELU() self.dropout = nn.Dropout() def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
MobtgZhang/MWMLNet
FeedForwardNetwork
false
5,615
[ "MIT" ]
1
125bb39935916b6b4be505c51cb6a04eb49b96d0
https://github.com/MobtgZhang/MWMLNet/tree/125bb39935916b6b4be505c51cb6a04eb49b96d0
import math import torch import torch.nn as nn class GELU(nn.Module): def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Model(nn.Module): def __init__(self, in_dim, hid_dim) ->None: super().__init__() self.lin1 = nn.Linear(in_dim, hid_dim) self.lin2 = nn.Linear(hid_dim, in_dim) self.gleu = GELU() self.dropout = nn.Dropout() def forward(self, inputs): hid = self.gleu(self.lin1(inputs)) return self.lin2(self.dropout(hid)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
BERTNextSentence
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] # Source node to ATen node mapping: # linear => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/t6/ct6t4t4f6df2rswen66qmskrag4dcnuaoxkebk4pssna63yxl3v3.py # Topologically Sorted Source Nodes: [linear, log_softmax], Original ATen: [aten.add, aten._log_softmax] # Source node to ATen node mapping: # linear => add # log_softmax => amax, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) triton_poi_fused__log_softmax_add_1 = async_compile.triton('triton_poi_fused__log_softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jt/cjtlgs566wh4nqwf6wvcsracaovzta35wbm5vllgfuavp5ebkiq4.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear, log_softmax], Original ATen: [aten.add, aten._log_softmax] triton_poi_fused__log_softmax_add_1.run(buf1, primals_3, buf2, 32, grid=grid(32), stream=stream0) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf2, buf3, 32, grid=grid(32), stream=stream0) del buf2 return (buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BERTNextSentence(nn.Module): def __init__(self, hidden): super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, x): return self.softmax(self.linear(x[:, 0])) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused__log_softmax_add_1[grid(32)](buf1, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0) del buf1 triton_poi_fused__log_softmax_2[grid(32)](buf2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf2 return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3 class BERTNextSentenceNew(nn.Module): def __init__(self, hidden): super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Moymix/BERT-pytorch
BERTNextSentence
false
5,616
[ "Apache-2.0" ]
1
f0b9c3ae53e05c00adcc761e0422e4222d8b5619
https://github.com/Moymix/BERT-pytorch/tree/f0b9c3ae53e05c00adcc761e0422e4222d8b5619
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, hidden): super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, x): return self.softmax(self.linear(x[:, 0])) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Upsample2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/s5/cs53ve6ff5smmuu5zxn6uwnvgsi32uuuvewmis3iy7oejdcvopvs.py # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # y_5 => constant_pad_nd_1 # Graph fragment: # %constant_pad_nd_1 : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%view_2, [0, 0, 1, 0, 1, 0, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 72) % 2 x1 = (xindex // 8) % 9 x0 = xindex % 8 x3 = (xindex // 144) x7 = xindex tmp0 = (-1) + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-1) + x1 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (((-1) + x1) % 2) tmp7 = tmp6 >= tmp1 tmp8 = (-1) + (x0 % 2) tmp9 = tmp8 >= tmp1 tmp10 = tmp7 & tmp9 tmp11 = tmp10 & tmp5 tmp12 = tl.load(in_ptr0 + ((4*((((-1) + x1) // 2) % 4)) + (16*x3) + (x0 // 2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + (x7), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qb/cqbibvks2cjhauc3nur2g3ih7ql3ix5qhuh7srxpiit2lwdgiiiw.py # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.convolution] # Source node to ATen node mapping: # y_9 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_3, %arg1_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 8], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = (yindex // 8) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (8*x2) + (144*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (8*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zh/czh33meqtkkfeqw325p6bix3tuaq4yza5hjchatomlwpg3z2e2yu.py # Topologically Sorted Source Nodes: [y_11], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # y_11 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_4, [None, None, %unsqueeze, %convert_element_type_3]), kwargs = {}) triton_poi_fused__unsafe_index_2 = async_compile.triton('triton_poi_fused__unsafe_index_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.625 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (5*tmp4) + (25*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 4, 4), (16, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 2, 9, 8), (144, 72, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [y_5], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 2304, grid=grid(2304), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((16, 1, 8, 8), (64, 1, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf0, buf1, 128, 8, grid=grid(128, 8), stream=stream0) del buf0 # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (16, 1, 5, 5), (25, 1, 5, 1)) del arg1_1 buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [y_11], Original ATen: [aten._unsafe_index] triton_poi_fused__unsafe_index_2.run(buf2, buf3, 1024, grid=grid(1024), stream=stream0) del buf2 return (reinterpret_tensor(buf3, (4, 4, 8, 8), (256, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Upsample2d(nn.Module): def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1): """ Upsample2d method in G_synthesis_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' self.gain = gain self.factor = factor self.opts = opts self.k = _setup_kernel(k) * (self.gain * factor ** 2) self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[0] - self.factor self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1 ) // 2 + factor - 1 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] self.down = down def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, 1, inW, 1, 1)) y = F.pad(y, (0, 0, self.factor - 1, 0, 0, 0, self.factor - 1, 0, 0, 0, 0, 0)) y = torch.reshape(y, (-1, 1, inH * self.factor, inW * self.factor)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH * self.factor + self.pady0 + self.pady1, inW * self.factor + self.padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH * self.factor + self.pady0 + self.pady1 - self.kernelH + 1, inW * self.factor + self.padx0 + self.padx1 - self.kernelW + 1) if inH * self.factor != y.shape[1]: y = F.interpolate(y, size=(inH * self.factor, inW * self.factor)) y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH * self.factor, inW * self.factor) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opts': _mock_config()}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 72 % 2 x1 = xindex // 8 % 9 x0 = xindex % 8 x3 = xindex // 144 x7 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x1 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = -1 + (-1 + x1) % 2 tmp7 = tmp6 >= tmp1 tmp8 = -1 + x0 % 2 tmp9 = tmp8 >= tmp1 tmp10 = tmp7 & tmp9 tmp11 = tmp10 & tmp5 tmp12 = tl.load(in_ptr0 + (4 * ((-1 + x1) // 2 % 4) + 16 * x3 + x0 // 2 ), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + x7, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 8 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 144 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 8 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__unsafe_index_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.625 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 5 * tmp4 + 25 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 4, 4), (16, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 2, 9, 8), (144, 72, 8, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(2304)](arg0_1, buf0, 2304, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 1, 8, 8), (64, 1, 8, 1), torch.float32) triton_poi_fused_convolution_1[grid(128, 8)](buf0, buf1, 128, 8, XBLOCK=8, YBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = extern_kernels.convolution(buf1, arg1_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (16, 1, 5, 5), (25, 1, 5, 1)) del arg1_1 buf3 = buf1 del buf1 triton_poi_fused__unsafe_index_2[grid(1024)](buf2, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf2 return reinterpret_tensor(buf3, (4, 4, 8, 8), (256, 64, 8, 1), 0), def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Upsample2dNew(nn.Module): def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1): """ Upsample2d method in G_synthesis_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' self.gain = gain self.factor = factor self.opts = opts self.k = _setup_kernel(k) * (self.gain * factor ** 2) self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[0] - self.factor self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1 ) // 2 + factor - 1 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] self.down = down def forward(self, input_0): arg1_1 = self.k arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
Iceland-Leo/StyleGAN2_PyTorch
Upsample2d
false
5,617
[ "MIT" ]
1
3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
https://github.com/Iceland-Leo/StyleGAN2_PyTorch/tree/3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
from _paritybench_helpers import _mock_config import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Model(nn.Module): def __init__(self, opts, k=[1, 3, 3, 1], factor=2, down=1, gain=1): """ Upsample2d method in G_synthesis_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' self.gain = gain self.factor = factor self.opts = opts self.k = _setup_kernel(k) * (self.gain * factor ** 2) self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[0] - self.factor self.padx0, self.pady0 = (self.p + 1) // 2 + factor - 1, (self.p + 1 ) // 2 + factor - 1 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] self.down = down def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, 1, inW, 1, 1)) y = F.pad(y, (0, 0, self.factor - 1, 0, 0, 0, self.factor - 1, 0, 0, 0, 0, 0)) y = torch.reshape(y, (-1, 1, inH * self.factor, inW * self.factor)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH * self.factor + self.pady0 + self.pady1, inW * self.factor + self.padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH * self.factor + self.pady0 + self.pady1 - self.kernelH + 1, inW * self.factor + self.padx0 + self.padx1 - self.kernelW + 1) if inH * self.factor != y.shape[1]: y = F.interpolate(y, size=(inH * self.factor, inW * self.factor)) y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH * self.factor, inW * self.factor) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []