entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
ParityPonderGRU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7e/c7edgnsiuilw7uzwau7radvkvvtmowm7d7uh56mczbhieiykfrnx.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.new_zeros]
# Source node to ATen node mapping:
# h => full_default
# Graph fragment:
# %full_default : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zw/czwxbu6nuxhoujusu3krfetmqzx7rxloioah6gicy4ie2wmv6tqi.py
# Topologically Sorted Source Nodes: [stack_3], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_3 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_8, %primals_8, %primals_8, %primals_8, %primals_6, %primals_6, %primals_6],), kwargs = {})
triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (x0), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 5, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr1 + (x0), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 6, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (x0), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 7, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tl.load(in_ptr1 + (x0), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + (x2), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hq/chq2ihz43k5f3canufmvrzz3vy4seaa7ohwwzmv2o6szsqgwtn4r.py
# Topologically Sorted Source Nodes: [stack_4], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_4 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_9, %primals_9, %primals_9, %primals_9, %primals_7, %primals_7, %primals_7],), kwargs = {})
triton_poi_fused_stack_2 = async_compile.triton('triton_poi_fused_stack_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp23 = tl.load(in_ptr1 + (0))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tmp0 >= tmp3
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp0 >= tmp8
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp0 >= tmp12
tmp16 = tl.full([1], 4, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tmp0 >= tmp16
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp25 = tmp0 >= tmp20
tmp26 = tl.full([1], 6, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp0 >= tmp26
tmp30 = tl.full([1], 7, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.where(tmp28, tmp24, tmp24)
tmp33 = tl.where(tmp22, tmp24, tmp32)
tmp34 = tl.where(tmp18, tmp6, tmp33)
tmp35 = tl.where(tmp14, tmp6, tmp34)
tmp36 = tl.where(tmp10, tmp6, tmp35)
tmp37 = tl.where(tmp4, tmp6, tmp36)
tl.store(out_ptr0 + (x0), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/g2/cg23xreycdv6fxtkdfop75dzuhabmjs3xhrvq22ytdld2qihmoyg.py
# Topologically Sorted Source Nodes: [stack_2], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2, %getitem_4, %getitem_6, %getitem, %getitem_2, %getitem_4],), kwargs = {})
triton_poi_fused_stack_3 = async_compile.triton('triton_poi_fused_stack_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (x0 + (4*((-16) + x1))), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + (4*((-20) + x1))), tmp29 & xmask, other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 28, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tl.load(in_ptr2 + (x0 + (4*((-24) + x1))), tmp31 & xmask, other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + (x2), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oo/coo3437vo4xsrwedl7lcrwfswym3k6uq6mzbtutpnydbupg5rgob.py
# Topologically Sorted Source Nodes: [p_m, sigmoid_2, sub_8, bernoulli_2, sigmoid_1, sub_4, bernoulli_1, sigmoid, un_halted_prob_1, p_n_1, un_halted_prob_2, p_n_2, p_n_3, bernoulli, halted_1, sub_2, mul_3, mul_4, p_m_1, halt_1, sub_6, mul_10, mul_11, p_m_2, halted_2, sub_9, halt_2, sub_10, mul_17, mul_18, p_m_3, halted_3, mul_20, mul_13, mul_6, y_m_1, mul_12, y_m_2, mul_19, y_m_3, lambda_n_3, bernoulli_3, sub_13, halt_3, sub_14, mul_24, mul_25, p_m_4, mul_26, mul_27, y_m_4], Original ATen: [aten.new_zeros, aten.sigmoid, aten.rsub, aten.bernoulli, aten.mul, aten.add, aten.new_ones]
# Source node to ATen node mapping:
# bernoulli => lt_2
# bernoulli_1 => convert_element_type_1, lt_1
# bernoulli_2 => convert_element_type, lt
# bernoulli_3 => convert_element_type_3, lt_3
# halt_1 => mul_10
# halt_2 => mul_13
# halt_3 => mul_23
# halted_1 => convert_element_type_2
# halted_2 => add_4
# halted_3 => add_6
# lambda_n_3 => full_default_6
# mul_10 => mul_11
# mul_11 => mul_12
# mul_12 => mul_19
# mul_13 => mul_17
# mul_17 => mul_14
# mul_18 => mul_15
# mul_19 => mul_20
# mul_20 => mul_16
# mul_24 => mul_24
# mul_25 => mul_25
# mul_26 => mul_26
# mul_27 => mul_27
# mul_3 => mul_7
# mul_4 => mul_8
# mul_6 => mul_18
# p_m => full_default_3
# p_m_1 => add_1
# p_m_2 => add_3
# p_m_3 => add_5
# p_m_4 => add_10
# p_n_1 => mul_2
# p_n_2 => mul_4
# p_n_3 => mul_5
# sigmoid => sigmoid_2
# sigmoid_1 => sigmoid_1
# sigmoid_2 => sigmoid
# sub_10 => sub_10
# sub_13 => sub_13
# sub_14 => sub_14
# sub_2 => sub_4
# sub_4 => sub_2
# sub_6 => sub_7
# sub_8 => sub_1
# sub_9 => sub_9
# un_halted_prob_1 => sub_3
# un_halted_prob_2 => mul_3
# y_m_1 => add_7
# y_m_2 => add_8
# y_m_3 => add_9
# y_m_4 => add_11
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_14,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select), kwargs = {})
# %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand, %select), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt, torch.float32), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_13,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_1), kwargs = {})
# %lt_1 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_1, %select_1), kwargs = {})
# %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_1, torch.float32), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_12,), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %select_2), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %select_1), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_2), kwargs = {})
# %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %select), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %sub_1), kwargs = {})
# %lt_2 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_2, %select_2), kwargs = {})
# %convert_element_type_2 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_2, torch.float32), kwargs = {})
# %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type_2), kwargs = {})
# %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default_3, %sub_4), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %convert_element_type_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %mul_8), kwargs = {})
# %mul_10 : [num_users=4] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %sub_4), kwargs = {})
# %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_10), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sub_7), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %mul_10), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %mul_12), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, %mul_10), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_4), kwargs = {})
# %mul_13 : [num_users=4] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %sub_9), kwargs = {})
# %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_13), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %sub_10), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %mul_13), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_14, %mul_15), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_13), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %mul_13), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %mul_10), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %convert_element_type_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %mul_18), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %sub_7), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %mul_17), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, %sub_10), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_20, %mul_16), kwargs = {})
# %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %lt_3 : [num_users=2] = call_function[target=torch.ops.aten.lt.Tensor](args = (%rand_3, %full_default_6), kwargs = {})
# %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%lt_3, torch.float32), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_6), kwargs = {})
# %mul_23 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_3, %sub_13), kwargs = {})
# %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_23), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %sub_14), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %mul_23), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, %mul_25), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, %sub_14), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %mul_23), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %mul_27), kwargs = {})
triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4 = async_compile.triton('triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*i1', 13: '*i1', 14: '*i1', 15: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (4))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp5 = tl.load(in_ptr2 + (16 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (5))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (20 + x0), xmask)
tmp13 = tl.load(in_ptr1 + (6))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp15 = tl.load(in_ptr2 + (24 + x0), xmask)
tmp18 = tl.load(in_ptr3 + (x0), xmask)
tmp20 = tl.load(in_ptr4 + (x0), xmask)
tmp22 = tl.load(in_ptr5 + (x0), xmask)
tmp59 = tl.load(in_ptr1 + (0))
tmp60 = tl.broadcast_to(tmp59, [XBLOCK])
tmp61 = tl.load(in_ptr2 + (x0), xmask)
tmp66 = tl.load(in_ptr1 + (1))
tmp67 = tl.broadcast_to(tmp66, [XBLOCK])
tmp68 = tl.load(in_ptr2 + (4 + x0), xmask)
tmp73 = tl.load(in_ptr1 + (2))
tmp74 = tl.broadcast_to(tmp73, [XBLOCK])
tmp75 = tl.load(in_ptr2 + (8 + x0), xmask)
tmp80 = tl.load(in_ptr1 + (3))
tmp81 = tl.broadcast_to(tmp80, [XBLOCK])
tmp82 = tl.load(in_ptr2 + (12 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 < tmp1
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp11 = tmp9 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp19 = tmp18 < tmp17
tmp21 = tmp20 < tmp12
tmp23 = tmp22 < tmp7
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp1 - tmp24
tmp26 = 0.0
tmp27 = tmp26 * tmp25
tmp28 = tmp7 * tmp24
tmp29 = tmp27 + tmp28
tmp30 = tmp21.to(tl.float32)
tmp31 = tmp30 * tmp25
tmp32 = tmp1 - tmp31
tmp33 = tmp29 * tmp32
tmp34 = tmp1 - tmp7
tmp35 = tmp34 * tmp12
tmp36 = tmp35 * tmp31
tmp37 = tmp33 + tmp36
tmp38 = tmp19.to(tl.float32)
tmp39 = tmp24 + tmp31
tmp40 = tmp1 - tmp39
tmp41 = tmp38 * tmp40
tmp42 = tmp1 - tmp41
tmp43 = tmp37 * tmp42
tmp44 = tmp1 - tmp12
tmp45 = tmp34 * tmp44
tmp46 = tmp45 * tmp17
tmp47 = tmp46 * tmp41
tmp48 = tmp43 + tmp47
tmp49 = tmp2.to(tl.float32)
tmp50 = tmp39 + tmp41
tmp51 = tmp1 - tmp50
tmp52 = tmp49 * tmp51
tmp53 = tmp1 - tmp52
tmp54 = tmp48 * tmp53
tmp55 = tmp1 - tmp17
tmp56 = tmp45 * tmp55
tmp57 = tmp56 * tmp52
tmp58 = tmp54 + tmp57
tmp62 = tmp60 + tmp61
tmp63 = tmp62 * tmp24
tmp64 = tmp27 + tmp63
tmp65 = tmp64 * tmp32
tmp69 = tmp67 + tmp68
tmp70 = tmp69 * tmp31
tmp71 = tmp65 + tmp70
tmp72 = tmp71 * tmp42
tmp76 = tmp74 + tmp75
tmp77 = tmp76 * tmp41
tmp78 = tmp72 + tmp77
tmp79 = tmp78 * tmp53
tmp83 = tmp81 + tmp82
tmp84 = tmp83 * tmp52
tmp85 = tmp79 + tmp84
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp7, xmask)
tl.store(out_ptr2 + (x0), tmp12, xmask)
tl.store(out_ptr3 + (x0), tmp17, xmask)
tl.store(out_ptr4 + (x0), tmp19, xmask)
tl.store(out_ptr5 + (x0), tmp21, xmask)
tl.store(out_ptr6 + (x0), tmp23, xmask)
tl.store(in_out_ptr0 + (x0), tmp58, xmask)
tl.store(in_out_ptr1 + (x0), tmp85, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jy/cjys3iopkrrusczes7dimlrpecwecndhklisdis5ynngdb4dpi3f.py
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack => cat_3
# Graph fragment:
# %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_2, %mul_2, %mul_4, %mul_5],), kwargs = {})
triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = 1.0
tmp12 = tmp11 - tmp10
tmp13 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 12, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr0 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp11 - tmp21
tmp23 = tl.load(in_ptr1 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp11 - tmp23
tmp25 = tmp22 * tmp24
tmp26 = tl.load(in_ptr2 + ((-8) + x0), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 * tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tmp0 >= tmp18
tmp31 = tl.full([1], 16, tl.int64)
tmp32 = tmp0 < tmp31
tmp33 = tl.load(in_ptr0 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp11 - tmp33
tmp35 = tl.load(in_ptr1 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp11 - tmp35
tmp37 = tmp34 * tmp36
tmp38 = tl.load(in_ptr2 + ((-12) + x0), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tmp11 - tmp38
tmp40 = tmp37 * tmp39
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp30, tmp40, tmp41)
tmp43 = tl.where(tmp20, tmp29, tmp42)
tmp44 = tl.where(tmp9, tmp16, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + (x0), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o4/co4faoo6vn5p4s7le2cjisym3ezxxx6wg265nankhpf5szhdz3rv.py
# Topologically Sorted Source Nodes: [stack_1], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack_1 => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%select_5, %select_4, %select_3, %select_6],), kwargs = {})
triton_poi_fused_stack_6 = async_compile.triton('triton_poi_fused_stack_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (1))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp25 = tl.load(in_ptr0 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp34 = tl.load(in_ptr0 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp17 = tl.load(in_ptr1 + (4 + ((-4) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp27 = tl.load(in_ptr1 + (8 + ((-8) + x0)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tmp32 = tl.full([1], 16, tl.int64)
tmp33 = tmp0 < tmp32
tmp36 = tl.load(in_ptr1 + (12 + ((-12) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tl.store(out_ptr0 + (x0), tmp42, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12, ), (1, ))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.new_zeros]
stream0 = get_raw_stream(0)
triton_poi_fused_new_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten._thnn_fused_gru_cell]
buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0, primals_4, primals_5)
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack_3], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(primals_8, primals_6, buf6, 28, grid=grid(28), stream=stream0)
del primals_6
del primals_8
buf7 = empty_strided_cuda((7, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [stack_4], Original ATen: [aten.stack]
triton_poi_fused_stack_2.run(primals_9, primals_7, buf7, 7, grid=grid(7), stream=stream0)
del primals_7
del primals_9
buf8 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.mm]
extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf8)
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten._thnn_fused_gru_cell]
buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4, primals_4, primals_5)
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [ret_2], Original ATen: [aten.mm]
extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf12)
# Topologically Sorted Source Nodes: [ret_2], Original ATen: [aten._thnn_fused_gru_cell]
buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12, buf10, primals_4, primals_5)
buf14 = buf13[0]
buf15 = buf13[1]
del buf13
buf16 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [ret_3], Original ATen: [aten.mm]
extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf16)
# Topologically Sorted Source Nodes: [ret_3], Original ATen: [aten._thnn_fused_gru_cell]
buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16, buf14, primals_4, primals_5)
del buf1
del buf16
del primals_4
del primals_5
buf18 = buf17[0]
buf19 = buf17[1]
del buf17
buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack_2], Original ATen: [aten.stack]
triton_poi_fused_stack_3.run(buf4, buf10, buf14, buf18, buf20, 112, grid=grid(112), stream=stream0)
buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [baddbmm], Original ATen: [aten.baddbmm]
extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21)
# Topologically Sorted Source Nodes: [bernoulli_2], Original ATen: [aten.bernoulli]
buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf24 = buf23
del buf23
# Topologically Sorted Source Nodes: [bernoulli_1], Original ATen: [aten.bernoulli]
buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf28 = buf27
del buf27
# Topologically Sorted Source Nodes: [bernoulli], Original ATen: [aten.bernoulli]
buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf32 = buf31
del buf31
# Topologically Sorted Source Nodes: [bernoulli_3], Original ATen: [aten.bernoulli]
buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf25 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf29 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf33 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf34 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf39 = buf34; del buf34 # reuse
buf35 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf40 = buf35; del buf35 # reuse
# Topologically Sorted Source Nodes: [p_m, sigmoid_2, sub_8, bernoulli_2, sigmoid_1, sub_4, bernoulli_1, sigmoid, un_halted_prob_1, p_n_1, un_halted_prob_2, p_n_2, p_n_3, bernoulli, halted_1, sub_2, mul_3, mul_4, p_m_1, halt_1, sub_6, mul_10, mul_11, p_m_2, halted_2, sub_9, halt_2, sub_10, mul_17, mul_18, p_m_3, halted_3, mul_20, mul_13, mul_6, y_m_1, mul_12, y_m_2, mul_19, y_m_3, lambda_n_3, bernoulli_3, sub_13, halt_3, sub_14, mul_24, mul_25, p_m_4, mul_26, mul_27, y_m_4], Original ATen: [aten.new_zeros, aten.sigmoid, aten.rsub, aten.bernoulli, aten.mul, aten.add, aten.new_ones]
triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4.run(buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32, buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, grid=grid(4), stream=stream0)
del buf24
del buf28
del buf32
del buf37
buf41 = reinterpret_tensor(buf18, (16, ), (1, ), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
triton_poi_fused_stack_5.run(buf30, buf26, buf22, buf41, 16, grid=grid(16), stream=stream0)
buf42 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [stack_1], Original ATen: [aten.stack]
triton_poi_fused_stack_6.run(buf7, buf21, buf42, 16, grid=grid(16), stream=stream0)
del buf21
del buf7
return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0), reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40, primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19, buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor(buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4), (16, 1, 4), 0), primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch import nn
from typing import Tuple
import torch.utils.data
import torch.nn.functional
import torch.autograd
class ParityPonderGRU(Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
self.gru = nn.GRUCell(n_elems, n_hidden)
self.output_layer = nn.Linear(n_hidden, 1)
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
self.is_halt = False
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor,
torch.Tensor, torch.Tensor]:
"""
* `x` is the input of shape `[batch_size, n_elems]`
This outputs a tuple of four tensors:
1. $p_1 \\dots p_N$ in a tensor of shape `[N, batch_size]`
2. $\\hat{y}_1 \\dots \\hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$
3. $p_m$ of shape `[batch_size]`
4. $\\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$
"""
batch_size = x.shape[0]
h = x.new_zeros((x.shape[0], self.n_hidden))
h = self.gru(x, h)
p = []
y = []
un_halted_prob = h.new_ones((batch_size,))
halted = h.new_zeros((batch_size,))
p_m = h.new_zeros((batch_size,))
y_m = h.new_zeros((batch_size,))
for n in range(1, self.max_steps + 1):
if n == self.max_steps:
lambda_n = h.new_ones(h.shape[0])
else:
lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0]
y_n = self.output_layer(h)[:, 0]
p_n = un_halted_prob * lambda_n
un_halted_prob = un_halted_prob * (1 - lambda_n)
halt = torch.bernoulli(lambda_n) * (1 - halted)
p.append(p_n)
y.append(y_n)
p_m = p_m * (1 - halt) + p_n * halt
y_m = y_m * (1 - halt) + y_n * halt
halted = halted + halt
h = self.gru(x, h)
if self.is_halt and halted.sum() == batch_size:
break
return torch.stack(p), torch.stack(y), p_m, y_m
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_elems': 4, 'n_hidden': 4, 'max_steps': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 28
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + x0, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + x0, tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 5, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr1 + x0, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 6, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + x0, tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp31 = tmp0 >= tmp27
tl.full([1], 7, tl.int64)
tmp34 = tl.load(in_ptr1 + x0, tmp31 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + x2, tmp40, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 7
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp23 = tl.load(in_ptr1 + 0)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tmp0 >= tmp3
tmp8 = tl.full([1], 2, tl.int64)
tmp9 = tmp0 < tmp8
tmp10 = tmp7 & tmp9
tmp11 = tmp0 >= tmp8
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp0 >= tmp12
tmp16 = tl.full([1], 4, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tmp0 >= tmp16
tmp20 = tl.full([1], 5, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp25 = tmp0 >= tmp20
tmp26 = tl.full([1], 6, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tl.full([1], 7, tl.int64)
tmp32 = tl.where(tmp28, tmp24, tmp24)
tmp33 = tl.where(tmp22, tmp24, tmp32)
tmp34 = tl.where(tmp18, tmp6, tmp33)
tmp35 = tl.where(tmp14, tmp6, tmp34)
tmp36 = tl.where(tmp10, tmp6, tmp35)
tmp37 = tl.where(tmp4, tmp6, tmp36)
tl.store(out_ptr0 + x0, tmp37, xmask)
@triton.jit
def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp19 & xmask, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 20, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (x0 + 4 * (-16 + x1)), tmp24 & xmask, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 24, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr1 + (x0 + 4 * (-20 + x1)), tmp29 & xmask, other=0.0)
tmp31 = tmp0 >= tmp27
tl.full([1], 28, tl.int64)
tmp34 = tl.load(in_ptr2 + (x0 + 4 * (-24 + x1)), tmp31 & xmask, other=0.0)
tmp35 = tl.where(tmp29, tmp30, tmp34)
tmp36 = tl.where(tmp24, tmp25, tmp35)
tmp37 = tl.where(tmp19, tmp20, tmp36)
tmp38 = tl.where(tmp14, tmp15, tmp37)
tmp39 = tl.where(tmp9, tmp10, tmp38)
tmp40 = tl.where(tmp4, tmp5, tmp39)
tl.store(out_ptr0 + x2, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 4)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp5 = tl.load(in_ptr2 + (16 + x0), xmask)
tmp8 = tl.load(in_ptr1 + 5)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (20 + x0), xmask)
tmp13 = tl.load(in_ptr1 + 6)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp15 = tl.load(in_ptr2 + (24 + x0), xmask)
tmp18 = tl.load(in_ptr3 + x0, xmask)
tmp20 = tl.load(in_ptr4 + x0, xmask)
tmp22 = tl.load(in_ptr5 + x0, xmask)
tmp59 = tl.load(in_ptr1 + 0)
tmp60 = tl.broadcast_to(tmp59, [XBLOCK])
tmp61 = tl.load(in_ptr2 + x0, xmask)
tmp66 = tl.load(in_ptr1 + 1)
tmp67 = tl.broadcast_to(tmp66, [XBLOCK])
tmp68 = tl.load(in_ptr2 + (4 + x0), xmask)
tmp73 = tl.load(in_ptr1 + 2)
tmp74 = tl.broadcast_to(tmp73, [XBLOCK])
tmp75 = tl.load(in_ptr2 + (8 + x0), xmask)
tmp80 = tl.load(in_ptr1 + 3)
tmp81 = tl.broadcast_to(tmp80, [XBLOCK])
tmp82 = tl.load(in_ptr2 + (12 + x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 < tmp1
tmp6 = tmp4 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp11 = tmp9 + tmp10
tmp12 = tl.sigmoid(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tl.sigmoid(tmp16)
tmp19 = tmp18 < tmp17
tmp21 = tmp20 < tmp12
tmp23 = tmp22 < tmp7
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp1 - tmp24
tmp26 = 0.0
tmp27 = tmp26 * tmp25
tmp28 = tmp7 * tmp24
tmp29 = tmp27 + tmp28
tmp30 = tmp21.to(tl.float32)
tmp31 = tmp30 * tmp25
tmp32 = tmp1 - tmp31
tmp33 = tmp29 * tmp32
tmp34 = tmp1 - tmp7
tmp35 = tmp34 * tmp12
tmp36 = tmp35 * tmp31
tmp37 = tmp33 + tmp36
tmp38 = tmp19.to(tl.float32)
tmp39 = tmp24 + tmp31
tmp40 = tmp1 - tmp39
tmp41 = tmp38 * tmp40
tmp42 = tmp1 - tmp41
tmp43 = tmp37 * tmp42
tmp44 = tmp1 - tmp12
tmp45 = tmp34 * tmp44
tmp46 = tmp45 * tmp17
tmp47 = tmp46 * tmp41
tmp48 = tmp43 + tmp47
tmp49 = tmp2.to(tl.float32)
tmp50 = tmp39 + tmp41
tmp51 = tmp1 - tmp50
tmp52 = tmp49 * tmp51
tmp53 = tmp1 - tmp52
tmp54 = tmp48 * tmp53
tmp55 = tmp1 - tmp17
tmp56 = tmp45 * tmp55
tmp57 = tmp56 * tmp52
tmp58 = tmp54 + tmp57
tmp62 = tmp60 + tmp61
tmp63 = tmp62 * tmp24
tmp64 = tmp27 + tmp63
tmp65 = tmp64 * tmp32
tmp69 = tmp67 + tmp68
tmp70 = tmp69 * tmp31
tmp71 = tmp65 + tmp70
tmp72 = tmp71 * tmp42
tmp76 = tmp74 + tmp75
tmp77 = tmp76 * tmp41
tmp78 = tmp72 + tmp77
tmp79 = tmp78 * tmp53
tmp83 = tmp81 + tmp82
tmp84 = tmp83 * tmp52
tmp85 = tmp79 + tmp84
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp7, xmask)
tl.store(out_ptr2 + x0, tmp12, xmask)
tl.store(out_ptr3 + x0, tmp17, xmask)
tl.store(out_ptr4 + x0, tmp19, xmask)
tl.store(out_ptr5 + x0, tmp21, xmask)
tl.store(out_ptr6 + x0, tmp23, xmask)
tl.store(in_out_ptr0 + x0, tmp58, xmask)
tl.store(in_out_ptr1 + x0, tmp85, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = 1.0
tmp12 = tmp11 - tmp10
tmp13 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp14 = tmp12 * tmp13
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 12, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr0 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp11 - tmp21
tmp23 = tl.load(in_ptr1 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp11 - tmp23
tmp25 = tmp22 * tmp24
tmp26 = tl.load(in_ptr2 + (-8 + x0), tmp20 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp27 = tmp25 * tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tmp0 >= tmp18
tl.full([1], 16, tl.int64)
tmp33 = tl.load(in_ptr0 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp34 = tmp11 - tmp33
tmp35 = tl.load(in_ptr1 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tmp11 - tmp35
tmp37 = tmp34 * tmp36
tmp38 = tl.load(in_ptr2 + (-12 + x0), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp39 = tmp11 - tmp38
tmp40 = tmp37 * tmp39
tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype)
tmp42 = tl.where(tmp30, tmp40, tmp41)
tmp43 = tl.where(tmp20, tmp29, tmp42)
tmp44 = tl.where(tmp9, tmp16, tmp43)
tmp45 = tl.where(tmp4, tmp5, tmp44)
tl.store(out_ptr0 + x0, tmp45, xmask)
@triton.jit
def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp15 = tl.load(in_ptr0 + 1)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp25 = tl.load(in_ptr0 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp34 = tl.load(in_ptr0 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp17 = tl.load(in_ptr1 + (4 + (-4 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp14, tmp18, tmp19)
tmp21 = tmp0 >= tmp12
tmp22 = tl.full([1], 12, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp27 = tl.load(in_ptr1 + (8 + (-8 + x0)), tmp24 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp24, tmp28, tmp29)
tmp31 = tmp0 >= tmp22
tl.full([1], 16, tl.int64)
tmp36 = tl.load(in_ptr1 + (12 + (-12 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp31, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp4, tmp10, tmp41)
tl.store(out_ptr0 + x0, tmp42, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12),
(1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf2)
buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0,
primals_4, primals_5)
buf4 = buf3[0]
buf5 = buf3[1]
del buf3
buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32)
triton_poi_fused_stack_1[grid(28)](primals_8, primals_6, buf6, 28,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_6
del primals_8
buf7 = empty_strided_cuda((7,), (1,), torch.float32)
triton_poi_fused_stack_2[grid(7)](primals_9, primals_7, buf7, 7,
XBLOCK=8, num_warps=1, num_stages=1)
del primals_7
del primals_9
buf8 = buf2
del buf2
extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf8)
buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4,
primals_4, primals_5)
buf10 = buf9[0]
buf11 = buf9[1]
del buf9
buf12 = buf8
del buf8
extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf12)
buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12,
buf10, primals_4, primals_5)
buf14 = buf13[0]
buf15 = buf13[1]
del buf13
buf16 = buf12
del buf12
extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1,
4), 0), out=buf16)
buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16,
buf14, primals_4, primals_5)
del buf1
del buf16
del primals_4
del primals_5
buf18 = buf17[0]
buf19 = buf17[1]
del buf17
buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32)
triton_poi_fused_stack_3[grid(112)](buf4, buf10, buf14, buf18,
buf20, 112, XBLOCK=128, num_warps=4, num_stages=1)
buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21)
buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf24 = buf23
del buf23
buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf28 = buf27
del buf27
buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf32 = buf31
del buf31
buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4,), (1,), torch.bool)
buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf25 = empty_strided_cuda((4,), (1,), torch.bool)
buf29 = empty_strided_cuda((4,), (1,), torch.bool)
buf33 = empty_strided_cuda((4,), (1,), torch.bool)
buf34 = empty_strided_cuda((4,), (1,), torch.float32)
buf39 = buf34
del buf34
buf35 = empty_strided_cuda((4,), (1,), torch.float32)
buf40 = buf35
del buf35
triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4[
grid(4)](buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32,
buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf24
del buf28
del buf32
del buf37
buf41 = reinterpret_tensor(buf18, (16,), (1,), 0)
del buf18
triton_poi_fused_stack_5[grid(16)](buf30, buf26, buf22, buf41, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_stack_6[grid(16)](buf7, buf21, buf42, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf21
del buf7
return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0),
reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40,
primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19,
buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor
(buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4
), (16, 1, 4), 0), primals_3)
class ParityPonderGRUNew(Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
self.gru = nn.GRUCell(n_elems, n_hidden)
self.output_layer = nn.Linear(n_hidden, 1)
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
self.is_halt = False
def forward(self, input_0):
primals_2 = self.gru.weight_ih
primals_3 = self.gru.weight_hh
primals_4 = self.gru.bias_ih
primals_5 = self.gru.bias_hh
primals_6 = self.output_layer.weight
primals_7 = self.output_layer.bias
primals_8 = self.lambda_layer.weight
primals_9 = self.lambda_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1], output[2], output[3]
| mcx/annotated_deep_learning_paper_implementations | ParityPonderGRU | false | 7,233 | [
"MIT"
] | 1 | f169f3a71dd2d36eb28ad31062d3475efa367b88 | https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88 | from torch.nn import Module
import torch
from torch import nn
from typing import Tuple
import torch.utils.data
import torch.nn.functional
import torch.autograd
class Model(Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
self.gru = nn.GRUCell(n_elems, n_hidden)
self.output_layer = nn.Linear(n_hidden, 1)
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
self.is_halt = False
def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor,
torch.Tensor, torch.Tensor]:
"""
* `x` is the input of shape `[batch_size, n_elems]`
This outputs a tuple of four tensors:
1. $p_1 \\dots p_N$ in a tensor of shape `[N, batch_size]`
2. $\\hat{y}_1 \\dots \\hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$
3. $p_m$ of shape `[batch_size]`
4. $\\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$
"""
batch_size = x.shape[0]
h = x.new_zeros((x.shape[0], self.n_hidden))
h = self.gru(x, h)
p = []
y = []
un_halted_prob = h.new_ones((batch_size,))
halted = h.new_zeros((batch_size,))
p_m = h.new_zeros((batch_size,))
y_m = h.new_zeros((batch_size,))
for n in range(1, self.max_steps + 1):
if n == self.max_steps:
lambda_n = h.new_ones(h.shape[0])
else:
lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0]
y_n = self.output_layer(h)[:, 0]
p_n = un_halted_prob * lambda_n
un_halted_prob = un_halted_prob * (1 - lambda_n)
halt = torch.bernoulli(lambda_n) * (1 - halted)
p.append(p_n)
y.append(y_n)
p_m = p_m * (1 - halt) + p_n * halt
y_m = y_m * (1 - halt) + y_n * halt
halted = halted + halt
h = self.gru(x, h)
if self.is_halt and halted.sum() == batch_size:
break
return torch.stack(p), torch.stack(y), p_m, y_m
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
equalized_linear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2i/c2if2yhrux7a2hf5x6cptfa3iexq65bs7d3zjjyatstrnnog3cdz.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t6/ct6f57cdvyh3ahq6iwyawuy7577bar2ftumjxqllolmn4c4lh7ph.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %expand), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.init import normal
import torch.utils.data
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError(
'Fan in and fan out can not be computed for tensor with less than 2 dimensions'
)
if dimensions == 2:
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
class equalized_linear(nn.Module):
def __init__(self, c_in, c_out, initializer='kaiming', a=1.0, reshape=False
):
super(equalized_linear, self).__init__()
self.linear = nn.Linear(c_in, c_out, bias=False)
if initializer == 'kaiming':
normal(self.linear.weight)
fan_in, _ = _calculate_fan_in_and_fan_out(self.linear.weight)
gain = (2.0 / (1.0 + a ** 2)) ** 0.5
self.scale = gain / fan_in ** 0.5
if reshape:
c_out /= 4 * 4
self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
self.reshape = reshape
def forward(self, x):
x = self.linear(x.mul(self.scale))
if self.reshape:
x = x.view(-1, 512, 4, 4)
x = x + self.bias.view(1, -1, 1, 1).expand_as(x)
else:
x = x + self.bias.view(1, -1).expand_as(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'c_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.init import normal
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError(
'Fan in and fan out can not be computed for tensor with less than 2 dimensions'
)
if dimensions == 2:
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
class equalized_linearNew(nn.Module):
def __init__(self, c_in, c_out, initializer='kaiming', a=1.0, reshape=False
):
super(equalized_linearNew, self).__init__()
self.linear = nn.Linear(c_in, c_out, bias=False)
if initializer == 'kaiming':
normal(self.linear.weight)
fan_in, _ = _calculate_fan_in_and_fan_out(self.linear.weight)
gain = (2.0 / (1.0 + a ** 2)) ** 0.5
self.scale = gain / fan_in ** 0.5
if reshape:
c_out /= 4 * 4
self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
self.reshape = reshape
def forward(self, input_0):
primals_3 = self.bias
primals_2 = self.linear.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mingo-x/pggan-pytorch | equalized_linear | false | 7,234 | [
"MIT"
] | 1 | a1dde73cd4df52476fe7c948d81fa9caea8070a5 | https://github.com/mingo-x/pggan-pytorch/tree/a1dde73cd4df52476fe7c948d81fa9caea8070a5 | import torch
import torch.nn as nn
from torch.nn.init import normal
import torch.utils.data
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError(
'Fan in and fan out can not be computed for tensor with less than 2 dimensions'
)
if dimensions == 2:
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
class Model(nn.Module):
def __init__(self, c_in, c_out, initializer='kaiming', a=1.0, reshape=False
):
super().__init__()
self.linear = nn.Linear(c_in, c_out, bias=False)
if initializer == 'kaiming':
normal(self.linear.weight)
fan_in, _ = _calculate_fan_in_and_fan_out(self.linear.weight)
gain = (2.0 / (1.0 + a ** 2)) ** 0.5
self.scale = gain / fan_in ** 0.5
if reshape:
c_out /= 4 * 4
self.bias = torch.nn.Parameter(torch.FloatTensor(c_out).fill_(0))
self.reshape = reshape
def forward(self, x):
x = self.linear(x.mul(self.scale))
if self.reshape:
x = x.view(-1, 512, 4, 4)
x = x + self.bias.view(1, -1, 1, 1).expand_as(x)
else:
x = x + self.bias.view(1, -1).expand_as(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7r/c7rmcz7d66c7acqsst3ljub72usieb7gow6csu7nmp55tklmjx2e.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => expm1, gt, mul, mul_2, where
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.elu]
triton_poi_fused_convolution_elu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class ConvBlock(nn.Module):
"""Layer to perform a convolution followed by ELU
"""
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 1.0
tmp6 = tmp2 * tmp5
tmp7 = libdevice.expm1(tmp6)
tmp8 = tmp7 * tmp5
tmp9 = tl.where(tmp4, tmp6, tmp8)
tl.store(in_out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0, buf2
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class ConvBlockNew(nn.Module):
"""Layer to perform a convolution followed by ELU
"""
def __init__(self, in_channels, out_channels):
super(ConvBlockNew, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, input_0):
primals_2 = self.conv.conv.weight
primals_3 = self.conv.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| minjabenho/image2pcl | ConvBlock | false | 7,235 | [
"Apache-2.0"
] | 1 | 7e696ee48edae30814d32f32e605ad6cf8bf702c | https://github.com/minjabenho/image2pcl/tree/7e696ee48edae30814d32f32e605ad6cf8bf702c | import torch
import torch.nn as nn
class Conv3x3(nn.Module):
"""Layer to pad and convolve input
"""
def __init__(self, in_channels, out_channels, use_refl=True):
super().__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class Model(nn.Module):
"""Layer to perform a convolution followed by ELU
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Project3D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/32/c32ppse2vdmak5is2nuwq2vbmvddtxyrdxkeqrxyec7bhptha7aa.py
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# cam_points => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = (xindex // 48)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p3/cp33b3l57w5jlxxjkbyxuiiw6erdw3sxbh2kc6ydjd74u3ubmxdx.py
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
# Source node to ATen node mapping:
# pix_coords_3 => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute_16, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 2), kwargs = {})
triton_poi_fused_mul_sub_1 = async_compile.triton('triton_poi_fused_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + (48*x2)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + (x4), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf1, 192, grid=grid(192), stream=stream0)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cam_points], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [sub, pix_coords_3], Original ATen: [aten.sub, aten.mul]
triton_poi_fused_mul_sub_1.run(buf2, buf3, 128, grid=grid(128), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Project3D(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3D, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P, points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(
1) + self.eps)
pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.
width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 48
x1 = xindex // 48
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex % 32
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = tmp1 == tmp4
tmp6 = tmp4 == tmp4
tmp9 = 1e-07
tmp10 = tmp8 + tmp9
tmp11 = tmp7 / tmp10
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = tl.where(tmp6, tmp13, tmp11)
tmp16 = tmp15 / tmp10
tmp17 = tl.where(tmp5, tmp13, tmp16)
tmp18 = tl.where(tmp5, tmp14, tmp17)
tmp19 = tmp18 * tmp12
tmp20 = tl.where(tmp3, tmp19, tmp18)
tmp21 = tmp0 == tmp4
tmp23 = tmp22 / tmp10
tmp24 = tl.where(tmp21, tmp13, tmp23)
tmp25 = tl.where(tmp21, tmp14, tmp24)
tmp26 = tl.where(tmp2, tmp19, tmp25)
tmp27 = tl.where(tmp2, tmp20, tmp26)
tmp28 = 0.5
tmp29 = tmp27 - tmp28
tmp30 = 2.0
tmp31 = tmp29 * tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2
)
del arg2_1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32)
triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return buf3,
class Project3DNew(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super(Project3DNew, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, input_0, input_1, input_2):
arg2_1 = input_0
arg0_1 = input_1
arg1_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| minjabenho/image2pcl | Project3D | false | 7,236 | [
"Apache-2.0"
] | 1 | 7e696ee48edae30814d32f32e605ad6cf8bf702c | https://github.com/minjabenho/image2pcl/tree/7e696ee48edae30814d32f32e605ad6cf8bf702c | import torch
import torch.nn as nn
class Model(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-07):
super().__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P, points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(
1) + self.eps)
pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.
width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords
def get_inputs():
return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SelfAttnLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3l/c3lu4ccbjruychszpewk67ythz75gaj4rslgmbux6fatrywe7g7t.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
# Source node to ATen node mapping:
# multi_head_attention_forward => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %getitem_5), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ol/colbiyeeegfdyyzeckjnylgg3xt3rkh3aadcz7fjtfx5472nedsg.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_4, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c5/cc5cm2utkmzhcjdhw5qgs7t254ixwfil74kthoebdprvhlljdmul.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
# Source node to ATen node mapping:
# multi_head_attention_forward => add_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %getitem_4), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vw/cvwoulenwdnyz242jfpxeidrk7o72lp64ezs7a4qyr6jzjnmz5zv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# multi_head_attention_forward => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_11, [1]), kwargs = {})
triton_poi_fused_mean_6 = async_compile.triton('triton_poi_fused_mean_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/h5/ch5mg4cp4hiu3m2725cfdtrg2qzbpfbabkhvt3p4ujimtbtffjtu.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add_3
# src_1 => clone_2
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute, %squeeze), kwargs = {})
# %clone_2 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%add_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp1 = tl.load(in_out_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x1 + (4*y0)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5m/c5m2x4kwr66u6jzlkjcacrwhzqxhxsn3hv6ryzwol7bzp7uppnze.py
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_1 => add_4, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_2, [1]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pg/cpgskb56mehof5k52uslszbldka4jbq52y6dhbe764xtjdj3lwxc.py
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_1 => add_4, add_5, mul_1, mul_2, rsqrt, sub_1, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_2, [1]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_2, %getitem_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xf/cxfcg7zjujrbwqervynu2zyrvp55bvbh5d5sr7rb7uygjdwkyhbn.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_10 = async_compile.triton('triton_poi_fused_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/eo/ceoth654lollb7zomdik5dia43d44y676jiemrjhuhkiqi2yqxq7.py
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src_2 => add_6
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_tensor), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf4, primals_3, 16, grid=grid(16), stream=stream0)
buf5 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf5, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (4, 1, 4), (1, 0, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
del buf7
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf9, buf10, 4, 4, grid=grid(4, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mean]
triton_poi_fused_mean_6.run(buf8, buf12, 16, grid=grid(16), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(buf13, primals_1, primals_5, 4, 4, grid=grid(4, 4), stream=stream0)
del primals_5
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(buf13, buf14, buf15, 4, grid=grid(4), stream=stream0)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf13, buf14, buf15, primals_6, primals_7, buf16, 16, grid=grid(16), stream=stream0)
del primals_7
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf17)
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
triton_poi_fused_relu_10.run(buf18, primals_9, 16, grid=grid(16), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf18, reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf19)
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf20, buf16, primals_11, 16, grid=grid(16), stream=stream0)
del primals_11
buf21 = buf15; del buf15 # reuse
buf22 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(buf20, buf21, buf22, 4, grid=grid(4), stream=stream0)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf20, buf21, buf22, primals_12, primals_13, buf23, 16, grid=grid(16), stream=stream0)
del buf21
del buf22
del primals_13
return (reinterpret_tensor(buf23, (4, 4), (1, 4), 0), reinterpret_tensor(buf12, (4, 4), (4, 1), 0), primals_6, primals_12, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf8, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf13, buf16, buf18, buf20, primals_10, primals_8, primals_4, reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
src2, attn = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attn
class SelfAttnLayer(nn.Module):
def __init__(self, d_model, nhead=4, dropout=0.1):
super().__init__()
self.transformer_layer = TransformerEncoderLayer(d_model, nhead,
d_model * 1, dropout=dropout, activation='relu')
def forward(self, k, mask=None):
attn = None
k = k.transpose(0, 1)
x, attn = self.transformer_layer(k, src_mask=mask)
x = x.transpose(0, 1)
return x, attn
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mean_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_out_ptr0, in_ptr0, in_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_out_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x1 + 4 * y0), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_poi_fused_add_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
triton_poi_fused_mul_1[grid(16)](buf4, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0)
del buf1
triton_poi_fused_add_2[grid(16)](buf5, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (4, 1, 4), (1, 0,
4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf7
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf3, (4, 4, 1), (1, 4,
0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf9, buf10, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mean_6[grid(16)](buf8, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused_add_native_layer_norm_7[grid(4, 4)](buf13,
primals_1, primals_5, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1,
num_stages=1)
del primals_5
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_8[grid(4)](buf13, buf14, buf15,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(16)](buf13, buf14, buf15,
primals_6, primals_7, buf16, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_7
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (4, 4), (1,
4), 0), out=buf17)
buf18 = buf17
del buf17
triton_poi_fused_relu_10[grid(16)](buf18, primals_9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf18, reinterpret_tensor(primals_10, (4, 4), (1,
4), 0), out=buf19)
buf20 = buf19
del buf19
triton_poi_fused_add_11[grid(16)](buf20, buf16, primals_11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
buf21 = buf15
del buf15
buf22 = buf14
del buf14
triton_poi_fused_native_layer_norm_8[grid(4)](buf20, buf21, buf22,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22,
primals_12, primals_13, buf23, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf21
del buf22
del primals_13
return (reinterpret_tensor(buf23, (4, 4), (1, 4), 0),
reinterpret_tensor(buf12, (4, 4), (4, 1), 0), primals_6, primals_12,
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf8,
reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf13, buf16, buf18,
buf20, primals_10, primals_8, primals_4, reinterpret_tensor(buf3, (
4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0))
def get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
src2, attn = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attn
class SelfAttnLayerNew(nn.Module):
def __init__(self, d_model, nhead=4, dropout=0.1):
super().__init__()
self.transformer_layer = TransformerEncoderLayer(d_model, nhead,
d_model * 1, dropout=dropout, activation='relu')
def forward(self, input_0):
primals_2 = self.transformer_layer.self_attn.in_proj_weight
primals_3 = self.transformer_layer.self_attn.in_proj_bias
primals_1 = self.transformer_layer.self_attn.out_proj.weight
primals_5 = self.transformer_layer.self_attn.out_proj.bias
primals_4 = self.transformer_layer.linear1.weight
primals_6 = self.transformer_layer.linear1.bias
primals_8 = self.transformer_layer.linear2.weight
primals_7 = self.transformer_layer.linear2.bias
primals_9 = self.transformer_layer.norm1.weight
primals_11 = self.transformer_layer.norm1.bias
primals_12 = self.transformer_layer.norm2.weight
primals_13 = self.transformer_layer.norm2.bias
primals_10 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
| mensudza/C-Tran | SelfAttnLayer | false | 7,237 | [
"MIT"
] | 1 | 4895ccb0e675ae2dcd2b619a9e47f30707062668 | https://github.com/mensudza/C-Tran/tree/4895ccb0e675ae2dcd2b619a9e47f30707062668 | import torch
import torch.nn as nn
import torch.nn.functional as F
def get_activation_fn(activation):
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
src2, attn = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attn
class Model(nn.Module):
def __init__(self, d_model, nhead=4, dropout=0.1):
super().__init__()
self.transformer_layer = TransformerEncoderLayer(d_model, nhead,
d_model * 1, dropout=dropout, activation='relu')
def forward(self, k, mask=None):
attn = None
k = k.transpose(0, 1)
x, attn = self.transformer_layer(k, src_mask=mask)
x = x.transpose(0, 1)
return x, attn
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
depthwise_separable_conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cc/ccc3jyqbz5rjanlhmaez5ahwrmffxjcm5mibxaw5efspqo45e4re.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 4), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 1296, grid=grid(1296), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf3, primals_5, 1296, grid=grid(1296), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class depthwise_separable_conv(torch.nn.Module):
def __init__(self, nin, nout, kernel_size, padding):
super(depthwise_separable_conv, self).__init__()
self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size,
padding=padding, groups=nin)
self.pointwise = nn.Conv2d(nin, nout, kernel_size=1)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nin': 4, 'nout': 4, 'kernel_size': 4, 'padding': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1296)](buf1, primals_2, 1296,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(1296)](buf3, primals_5, 1296,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class depthwise_separable_convNew(torch.nn.Module):
def __init__(self, nin, nout, kernel_size, padding):
super(depthwise_separable_convNew, self).__init__()
self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size,
padding=padding, groups=nin)
self.pointwise = nn.Conv2d(nin, nout, kernel_size=1)
def forward(self, input_0):
primals_1 = self.depthwise.weight
primals_2 = self.depthwise.bias
primals_4 = self.pointwise.weight
primals_5 = self.pointwise.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| mirayyuce/Neural-Architecture-Search | depthwise_separable_conv | false | 7,238 | [
"BSD-3-Clause"
] | 1 | e294816c85200f4301376c8b355634c6cca81816 | https://github.com/mirayyuce/Neural-Architecture-Search/tree/e294816c85200f4301376c8b355634c6cca81816 | import torch
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self, nin, nout, kernel_size, padding):
super().__init__()
self.depthwise = nn.Conv2d(nin, nin, kernel_size=kernel_size,
padding=padding, groups=nin)
self.pointwise = nn.Conv2d(nin, nout, kernel_size=1)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 4]
|
BertPredictionHeadTransform | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k6/ck6o2ucwdqtvjyw7bruyzgade2k6iruvl53t2wmqy2xkgypurpgf.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# erf => erf
# hidden_states_1 => mul_1
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sub => sub
# truediv => div
# u => mean
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + (x0), tmp31, xmask)
tl.store(out_ptr1 + (x0), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ew/cewcb66a7hyf2vxy6evimdhxxg6p7casfhukvhbgdoijgab2kyck.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# erf => erf
# hidden_states_1 => mul_1
# hidden_states_2 => add_2
# mul => mul
# mul_2 => mul_2
# sqrt => sqrt
# sub => sub
# truediv => div
# x => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_5), kwargs = {})
triton_poi_fused_add_div_erf_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_erf_mul_sqrt_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = tmp12 + tmp7
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp11 / tmp14
tmp16 = tmp0 * tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
triton_poi_fused_add_div_erf_mul_sqrt_sub_1.run(primals_4, buf0, buf1, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_5
return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, hidden_states):
"""(N, L, D)"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = tmp12 + tmp7
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp11 / tmp14
tmp16 = tmp0 * tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4,
buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf2
del primals_5
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertPredictionHeadTransformNew(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransformNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, input_0):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_4 = self.LayerNorm.weight
primals_5 = self.LayerNorm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| minjoong507/Image-Captioning-Transformer | BertPredictionHeadTransform | false | 7,239 | [
"MIT"
] | 1 | 813060f0bb656e336154173f11e99a80362c8c2a | https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, hidden_states):
"""(N, L, D)"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Router | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# u_hat => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ts/cts7q6dfb3copgexqebefx4p456ecsgth6p6xmle2mmldywndoi3.py
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# s => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 + tmp1
tmp3 = tmp2 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp1 / tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gu/cgurb3rc57bwl3qa722rpwfklrhzbjpi2aveuapolt7tdvfpdis7.py
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# s => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = (xindex // 4) % 4
x3 = (xindex // 16)
y0 = yindex
x4 = xindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1) + (16*x3) + (64*x2)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + (64*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2t/c2t7stxsalfhsthweh45s4auxfz46xrbwkiajexefoyk5kfgiqco.py
# Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, v], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# pow_1 => pow_1
# s2 => sum_2
# sqrt => sqrt
# truediv => div_1
# truediv_1 => div_2
# v => mul
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_7, 2), kwargs = {})
# %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div_2), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_3 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = 1e-08
tmp16 = tmp10 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp14 / tmp17
tmp19 = tmp13 * tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n3/cn3o76uutxwnkvtnqyvaxx7tnscbvuwvgsyd6kmrbucnocl6jr5o.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# a => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_13,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = (xindex // 4)
y0 = yindex
x3 = xindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + (64*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/j7/cj7mv2k5l2kigfluq2rwwpouckm4oow7jia7wwvjogp3qlr23xwv.py
# Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# c_1 => amax_1, exp_1, sub_1
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vf/cvfvkiz4k3grvhzidjc6vivbeubtw7idhhjrnb6dlbg5vf7fihed.py
# Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# c_1 => div_3, sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + ((4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/32/c32db6tn7hvzhdl4i5logasyamysp275l3si7xspyfcfakibqvvh.py
# Topologically Sorted Source Nodes: [s_1], Original ATen: [aten.bmm]
# Source node to ATen node mapping:
# s_1 => bmm_3
# Graph fragment:
# %bmm_3 : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%view_12, %view_5), kwargs = {})
triton_poi_fused_bmm_7 = async_compile.triton('triton_poi_fused_bmm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(x0 // 4)) + (x0 % 4)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dp/cdpsbcq6jlubfjxv6az2acdud37r3h5vv43z6ggwpbd74blne64h.py
# Topologically Sorted Source Nodes: [b_2, c_2], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# b_2 => add_5
# c_2 => amax_2, exp_2, sub_2, sum_5
# Graph fragment:
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_5, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
triton_poi_fused__softmax_add_8 = async_compile.triton('triton_poi_fused__softmax_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + (x0), tmp14, xmask)
tl.store(out_ptr1 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xv/cxvds37zsfn7me7vs5spzrzx36owr723owrhlinbklqa5b6xihbd.py
# Topologically Sorted Source Nodes: [b_2, c_2, s_2], Original ATen: [aten.add, aten._softmax, aten.clone]
# Source node to ATen node mapping:
# b_2 => add_5
# c_2 => amax_2, div_6, exp_2, sub_2
# s_2 => clone_7
# Graph fragment:
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_5, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_5), kwargs = {})
# %clone_7 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_27,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused__softmax_add_clone_9 = async_compile.triton('triton_poi_fused__softmax_add_clone_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_clone_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_clone_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/z2/cz245egjx7g4j7mn3wb3p6jf5sptc7kpicetafs56orxfbjbs2fy.py
# Topologically Sorted Source Nodes: [b_2, b_3, c_3], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# b_2 => add_5
# b_3 => add_8
# c_3 => amax_3, exp_3, sub_3, sum_7
# Graph fragment:
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_27), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_8, [1], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {})
triton_poi_fused__softmax_add_10 = async_compile.triton('triton_poi_fused__softmax_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 - tmp22
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp9 - tmp22
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp15 - tmp22
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp21 - tmp22
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tl.store(out_ptr0 + (x0), tmp22, xmask)
tl.store(out_ptr1 + (x0), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ux/cux3mtyatkeh2xctjovuxyuzb6jrzez2afgyw5oaihrzojk4rxa5.py
# Topologically Sorted Source Nodes: [b_2, b_3, c_3, s_3], Original ATen: [aten.add, aten._softmax, aten.clone]
# Source node to ATen node mapping:
# b_2 => add_5
# b_3 => add_8
# c_3 => div_9, exp_3, sub_3
# s_3 => clone_10
# Graph fragment:
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_19), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_27), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_7), kwargs = {})
# %clone_10 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_37,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused__softmax_add_clone_11 = async_compile.triton('triton_poi_fused__softmax_add_clone_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_clone_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_clone_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jn/cjnqdeixskhije623cnhbvm5o2lfq2nkhcelbbsiuctormqu3kg7.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
# Source node to ATen node mapping:
# Graph fragment:
# %permute_74 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_12, [0, 2, 1]), kwargs = {})
triton_poi_fused_transpose_12 = async_compile.triton('triton_poi_fused_transpose_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_transpose_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(y0 // 4)) + (y0 % 4)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 1, 16), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, 64, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf1, buf3, 4, 64, grid=grid(4, 64), stream=stream0)
buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, v], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul]
triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf1, buf6, 4, 64, grid=grid(4, 64), stream=stream0)
del buf1
buf7 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf8, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 1, 4), (1, 64, 16), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [s_1], Original ATen: [aten.bmm]
triton_poi_fused_bmm_7.run(buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf10, reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf11)
buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [pow_2, s2_1, add_3, truediv_2, add_4, sqrt_1, truediv_3, v_1], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul]
triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf12, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf13)
buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [b_2, c_2], Original ATen: [aten.add, aten._softmax]
triton_poi_fused__softmax_add_8.run(buf7, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [b_2, c_2, s_2], Original ATen: [aten.add, aten._softmax, aten.clone]
triton_poi_fused__softmax_add_clone_9.run(buf7, buf13, buf14, buf15, buf16, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_3, s2_2, add_6, truediv_4, add_7, sqrt_2, truediv_5, v_2], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul]
triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf18, buf19, 64, grid=grid(64), stream=stream0)
buf20 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf19, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf20)
buf21 = buf15; del buf15 # reuse
buf22 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [b_2, b_3, c_3], Original ATen: [aten.add, aten._softmax]
triton_poi_fused__softmax_add_10.run(buf7, buf13, buf20, buf21, buf22, 16, grid=grid(16), stream=stream0)
buf23 = reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0); del buf13 # reuse
buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [b_2, b_3, c_3, s_3], Original ATen: [aten.add, aten._softmax, aten.clone]
triton_poi_fused__softmax_add_clone_11.run(buf23, buf7, buf20, buf21, buf22, buf24, 64, grid=grid(64), stream=stream0)
del buf21
del buf22
buf25 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [s_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf24, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_4, s2_3, add_9, truediv_6, add_10, sqrt_3, truediv_7, v_3], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul]
triton_poi_fused_add_div_mul_pow_sqrt_sum_3.run(buf25, buf26, 64, grid=grid(64), stream=stream0)
buf27 = empty_strided_cuda((16, 4, 1), (4, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose]
triton_poi_fused_transpose_12.run(buf9, buf27, 16, 4, grid=grid(16, 4), stream=stream0)
del buf9
return (buf26, buf4, buf7, buf11, buf16, buf18, buf23, buf25, reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 4), 0), buf27, reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class Squash(Module):
'\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n '
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, s: 'torch.Tensor'):
"""
The shape of `s` is `[batch_size, n_capsules, n_features]`
"""
s2 = (s ** 2).sum(dim=-1, keepdims=True)
return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon))
class Router(Module):
"""
## Routing Algorithm
This is the routing mechanism described in the paper.
You can use multiple routing layers in your models.
This combines calculating $\\mathbf{s}_j$ for this layer and
the routing algorithm described in *Procedure 1*.
"""
def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d:
'int', iterations: 'int'):
"""
`in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below.
`out_caps` and `out_d` are the same for this layer.
`iterations` is the number of routing iterations, symbolized by $r$ in the paper.
"""
super().__init__()
self.in_caps = in_caps
self.out_caps = out_caps
self.iterations = iterations
self.softmax = nn.Softmax(dim=1)
self.squash = Squash()
self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d,
out_d), requires_grad=True)
def forward(self, u: 'torch.Tensor'):
"""
The shape of `u` is `[batch_size, n_capsules, n_features]`.
These are the capsules from the lower layer.
"""
u_hat = torch.einsum('ijnm,bin->bijm', self.weight, u)
b = u.new_zeros(u.shape[0], self.in_caps, self.out_caps)
v = None
for i in range(self.iterations):
c = self.softmax(b)
s = torch.einsum('bij,bijm->bjm', c, u_hat)
v = self.squash(s)
a = torch.einsum('bjm,bijm->bij', v, u_hat)
b = b + a
return v
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_caps': 4, 'out_caps': 4, 'in_d': 4, 'out_d': 4,
'iterations': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Module
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 + tmp1
tmp3 = tmp2 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp1 / tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4 % 4
x3 = xindex // 16
y0 = yindex
x4 = xindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1 + 16 * x3 + 64 * x2), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4 + 64 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_3(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = 1e-08
tmp16 = tmp10 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp14 / tmp17
tmp19 = tmp13 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex % 4
x2 = xindex // 4
y0 = yindex
x3 = xindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * x1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + 64 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_bmm_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (x0 // 4) + x0 % 4), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_add_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_poi_fused__softmax_add_clone_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_add_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = triton_helpers.maximum(tmp10, tmp15)
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = triton_helpers.maximum(tmp16, tmp21)
tmp23 = tmp4 - tmp22
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp9 - tmp22
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tmp15 - tmp22
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp21 - tmp22
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tl.store(out_ptr0 + x0, tmp22, xmask)
tl.store(out_ptr1 + x0, tmp33, xmask)
@triton.jit
def triton_poi_fused__softmax_add_clone_11(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tl.store(in_out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_transpose_12(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(primals_2, (4, 4, 4), (4, 1, 16), 0),
out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_2[grid(4, 64)](buf1, buf3, 4, 64, XBLOCK=32,
YBLOCK=4, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf4, buf5,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(4, 64)](buf1, buf6, 4, 64, XBLOCK=32,
YBLOCK=4, num_warps=4, num_stages=1)
del buf1
buf7 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused__softmax_5[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_6[grid(16, 4)](buf8, buf9, 16, 4, XBLOCK=
4, YBLOCK=16, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 1, 4), (1, 64, 16), 0)
del buf8
triton_poi_fused_bmm_7[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(buf10, reinterpret_tensor(buf3, (16, 4, 4), (16,
4, 1), 0), out=buf11)
buf12 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf11, buf12,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf12, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf13)
buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused__softmax_add_8[grid(16)](buf7, buf13, buf14, buf15,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused__softmax_add_clone_9[grid(64)](buf7, buf13, buf14,
buf15, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf17, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf20 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf19, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf20)
buf21 = buf15
del buf15
buf22 = buf14
del buf14
triton_poi_fused__softmax_add_10[grid(16)](buf7, buf13, buf20,
buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf13, (4, 4, 4), (16, 1, 4), 0)
del buf13
buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused__softmax_add_clone_11[grid(64)](buf23, buf7, buf20,
buf21, buf22, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf21
del buf22
buf25 = buf20
del buf20
extern_kernels.bmm(reinterpret_tensor(buf24, (16, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_3[grid(64)](buf25, buf26,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf27 = empty_strided_cuda((16, 4, 1), (4, 1, 4), torch.float32)
triton_poi_fused_transpose_12[grid(16, 4)](buf9, buf27, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf9
return (buf26, buf4, buf7, buf11, buf16, buf18, buf23, buf25,
reinterpret_tensor(buf24, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 4), 0), buf27,
reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0))
class Squash(Module):
'\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n '
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, s: 'torch.Tensor'):
"""
The shape of `s` is `[batch_size, n_capsules, n_features]`
"""
s2 = (s ** 2).sum(dim=-1, keepdims=True)
return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon))
class RouterNew(Module):
"""
## Routing Algorithm
This is the routing mechanism described in the paper.
You can use multiple routing layers in your models.
This combines calculating $\\mathbf{s}_j$ for this layer and
the routing algorithm described in *Procedure 1*.
"""
def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d:
'int', iterations: 'int'):
"""
`in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below.
`out_caps` and `out_d` are the same for this layer.
`iterations` is the number of routing iterations, symbolized by $r$ in the paper.
"""
super().__init__()
self.in_caps = in_caps
self.out_caps = out_caps
self.iterations = iterations
self.softmax = nn.Softmax(dim=1)
self.squash = Squash()
self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d,
out_d), requires_grad=True)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| mcx/annotated_deep_learning_paper_implementations | Router | false | 7,240 | [
"MIT"
] | 1 | f169f3a71dd2d36eb28ad31062d3475efa367b88 | https://github.com/mcx/annotated_deep_learning_paper_implementations/tree/f169f3a71dd2d36eb28ad31062d3475efa367b88 | from torch.nn import Module
import torch
from torch import nn
import torch.utils.data
import torch.nn.functional
import torch.autograd
class Squash(Module):
'\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n '
def __init__(self, epsilon=1e-08):
super().__init__()
self.epsilon = epsilon
def forward(self, s: 'torch.Tensor'):
"""
The shape of `s` is `[batch_size, n_capsules, n_features]`
"""
s2 = (s ** 2).sum(dim=-1, keepdims=True)
return s2 / (1 + s2) * (s / torch.sqrt(s2 + self.epsilon))
class Model(Module):
"""
## Routing Algorithm
This is the routing mechanism described in the paper.
You can use multiple routing layers in your models.
This combines calculating $\\mathbf{s}_j$ for this layer and
the routing algorithm described in *Procedure 1*.
"""
def __init__(self, in_caps: 'int', out_caps: 'int', in_d: 'int', out_d:
'int', iterations: 'int'):
"""
`in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below.
`out_caps` and `out_d` are the same for this layer.
`iterations` is the number of routing iterations, symbolized by $r$ in the paper.
"""
super().__init__()
self.in_caps = in_caps
self.out_caps = out_caps
self.iterations = iterations
self.softmax = nn.Softmax(dim=1)
self.squash = Squash()
self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d,
out_d), requires_grad=True)
def forward(self, u: 'torch.Tensor'):
"""
The shape of `u` is `[batch_size, n_capsules, n_features]`.
These are the capsules from the lower layer.
"""
u_hat = torch.einsum('ijnm,bin->bijm', self.weight, u)
b = u.new_zeros(u.shape[0], self.in_caps, self.out_caps)
v = None
for i in range(self.iterations):
c = self.softmax(b)
s = torch.einsum('bij,bijm->bjm', c, u_hat)
v = self.squash(s)
a = torch.einsum('bjm,bijm->bij', v, u_hat)
b = b + a
return v
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_caps': 4, 'out_caps': 4, 'in_d': 4, 'out_d': 4,
'iterations': 4}]
|
Pointer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ku/ckutxoa3tdkp4vgpaf6cdwo3umpfmhw4aepnimgnqqvfrbw2wcgq.py
# Topologically Sorted Source Nodes: [X1, X2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# X1 => cat
# X2 => cat_1
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_3], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr2 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp6 & xmask, other=0.0)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x3), tmp10, xmask)
tl.store(out_ptr1 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lj/cljrqgtjvj3sotyhumpepmt4by4ntzixml6oyfutn3hxxwv4cfyj.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, Y1, mul_2, Y2], Original ATen: [aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# Y1 => add
# Y2 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %primals_5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_5), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -1e+30), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %primals_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp8 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = -1e+30
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 + tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [X1, X2], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, buf1, 128, grid=grid(128), stream=stream0)
del primals_1
del primals_2
del primals_3
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4), (4, 4, 1))
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4), (4, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, mul_1, Y1, mul_2, Y2], Original ATen: [aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_mul_rsub_1.run(buf2, primals_5, buf4, buf3, buf5, 64, grid=grid(64), stream=stream0)
del buf2
del buf4
return (buf3, buf5, primals_4, primals_5, primals_6, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 8, 1), (8, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 8, 1), (8, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class Initialized_Conv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, groups=1, relu=False, bias=False):
super().__init__()
self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride
=stride, padding=padding, groups=groups, bias=bias)
if relu is True:
self.relu = True
nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu')
else:
self.relu = False
nn.init.xavier_uniform_(self.out.weight)
def forward(self, x):
if self.relu is True:
return nn.functional.relu(self.out(x))
else:
return self.out(x)
class Pointer(nn.Module):
def __init__(self, d_model):
super().__init__()
self.w1 = Initialized_Conv1d(d_model * 2, 1)
self.w2 = Initialized_Conv1d(d_model * 2, 1)
def forward(self, M1, M2, M3, mask):
X1 = torch.cat([M1, M2], dim=1)
X2 = torch.cat([M1, M3], dim=1)
Y1 = mask_logits(self.w1(X1).squeeze(), mask)
Y2 = mask_logits(self.w2(X2).squeeze(), mask)
return Y1, Y2
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp11 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask,
other=0.0)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x3, tmp10, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp1
tmp5 = -1e+30
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 + tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 8, 1), (8, 1, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (1, 8, 1), (8, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, primals_3,
buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
del primals_3
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4), (4, 4, 1))
buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4), (4, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_1[grid(64)](buf2, primals_5, buf4,
buf3, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del buf4
return buf3, buf5, primals_4, primals_5, primals_6, buf0, buf1
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class Initialized_Conv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, groups=1, relu=False, bias=False):
super().__init__()
self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride
=stride, padding=padding, groups=groups, bias=bias)
if relu is True:
self.relu = True
nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu')
else:
self.relu = False
nn.init.xavier_uniform_(self.out.weight)
def forward(self, x):
if self.relu is True:
return nn.functional.relu(self.out(x))
else:
return self.out(x)
class PointerNew(nn.Module):
def __init__(self, d_model):
super().__init__()
self.w1 = Initialized_Conv1d(d_model * 2, 1)
self.w2 = Initialized_Conv1d(d_model * 2, 1)
def forward(self, input_0, input_1, input_2, input_3):
primals_4 = self.w1.out.weight
primals_6 = self.w2.out.weight
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
primals_5 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| mirbostani/QA-KD-AL | Pointer | false | 7,241 | [
"MIT"
] | 1 | 0ec8756ee06ae2a204a5e9110503bc697e9108fb | https://github.com/mirbostani/QA-KD-AL/tree/0ec8756ee06ae2a204a5e9110503bc697e9108fb | import torch
import torch.nn as nn
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class Initialized_Conv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, groups=1, relu=False, bias=False):
super().__init__()
self.out = nn.Conv1d(in_channels, out_channels, kernel_size, stride
=stride, padding=padding, groups=groups, bias=bias)
if relu is True:
self.relu = True
nn.init.kaiming_normal_(self.out.weight, nonlinearity='relu')
else:
self.relu = False
nn.init.xavier_uniform_(self.out.weight)
def forward(self, x):
if self.relu is True:
return nn.functional.relu(self.out(x))
else:
return self.out(x)
class Model(nn.Module):
def __init__(self, d_model):
super().__init__()
self.w1 = Initialized_Conv1d(d_model * 2, 1)
self.w2 = Initialized_Conv1d(d_model * 2, 1)
def forward(self, M1, M2, M3, mask):
X1 = torch.cat([M1, M2], dim=1)
X2 = torch.cat([M1, M3], dim=1)
Y1 = mask_logits(self.w1(X1).squeeze(), mask)
Y2 = mask_logits(self.w2(X2).squeeze(), mask)
return Y1, Y2
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
SSIM | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kc/ckc5ftdtcxbcmqsxx57xrul2upk2ygczpppolriskw2ya5alzvp2.py
# Topologically Sorted Source Nodes: [x, y, mul], Original ATen: [aten.reflection_pad2d, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x => _unsafe_index, _unsafe_index_1
# y => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_7]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %_unsafe_index_3), kwargs = {})
triton_poi_fused_mul_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_mul_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lo/clow3kzmb7jev264jm5hdsre6kicqfeascpf6ijglocrlea2sell.py
# Topologically Sorted Source Nodes: [x, mu_x, mul_2, y, mu_y, mul_3, add, mul, avg_pool2d_4, mul_1, sigma_xy, mul_4, add_1, SSIM_n, pow_5, pow_6, add_2, add_3, pow_1, avg_pool2d_2, pow_2, sigma_x, pow_3, avg_pool2d_3, pow_4, sigma_y, add_4, add_5, SSIM_d, truediv, sub_3, truediv_1, clamp], Original ATen: [aten.reflection_pad2d, aten.avg_pool2d, aten.mul, aten.add, aten.sub, aten.pow, aten.div, aten.rsub, aten.clamp]
# Source node to ATen node mapping:
# SSIM_d => mul_6
# SSIM_n => mul_5
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# avg_pool2d_2 => avg_pool2d_2
# avg_pool2d_3 => avg_pool2d_3
# avg_pool2d_4 => avg_pool2d_4
# clamp => clamp_max, clamp_min
# mu_x => avg_pool2d
# mu_y => avg_pool2d_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# sigma_x => sub_8
# sigma_xy => sub_10
# sigma_y => sub_9
# sub_3 => sub_11
# truediv => div
# truediv_1 => div_1
# x => _unsafe_index, _unsafe_index_1
# y => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_3]), kwargs = {})
# %avg_pool2d : [num_users=4] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%_unsafe_index_1, [3, 3], [1, 1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 2), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg1_1, [None, None, %sub_5, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=3] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_7]), kwargs = {})
# %avg_pool2d_1 : [num_users=4] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%_unsafe_index_3, [3, 3], [1, 1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %avg_pool2d_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 0.0001), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %_unsafe_index_3), kwargs = {})
# %avg_pool2d_4 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%mul, [3, 3], [1, 1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, %avg_pool2d_1), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_4, %mul_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0.0009), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d, 2), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_5, %pow_6), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 0.0001), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%_unsafe_index_1, 2), kwargs = {})
# %avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [3, 3], [1, 1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d, 2), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_2, %pow_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%_unsafe_index_3, 2), kwargs = {})
# %avg_pool2d_3 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_3, [3, 3], [1, 1]), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d_1, 2), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%avg_pool2d_3, %pow_4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_8, %sub_9), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, 0.0009), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %add_5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_5, %mul_6), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_11, 2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div_1, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1 = async_compile.triton('triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 27, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (36*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (6*x1) + (36*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (6*x1) + (36*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + (6*x1) + (36*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + (6*x1) + (36*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + (6*x1) + (36*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (36*x2)), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + (6*x1) + (36*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + (6*x1) + (36*x2)), xmask)
tmp19 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp22 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp24 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp28 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp30 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp34 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp55 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp58 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask)
tmp60 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp64 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tmp66 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp70 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-2) + x0))) + ((-4)*(tl_math.abs((-2) + x1))) + (16*x2)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tmp21 = tmp20 + tmp19
tmp23 = tmp22 + tmp21
tmp25 = tmp24 + tmp23
tmp27 = tmp26 + tmp25
tmp29 = tmp28 + tmp27
tmp31 = tmp30 + tmp29
tmp33 = tmp32 + tmp31
tmp35 = tmp34 + tmp33
tmp36 = tmp35 * tmp17
tmp37 = tmp19 * tmp19
tmp38 = tmp20 * tmp20
tmp39 = tmp38 + tmp37
tmp40 = tmp22 * tmp22
tmp41 = tmp40 + tmp39
tmp42 = tmp24 * tmp24
tmp43 = tmp42 + tmp41
tmp44 = tmp26 * tmp26
tmp45 = tmp44 + tmp43
tmp46 = tmp28 * tmp28
tmp47 = tmp46 + tmp45
tmp48 = tmp30 * tmp30
tmp49 = tmp48 + tmp47
tmp50 = tmp32 * tmp32
tmp51 = tmp50 + tmp49
tmp52 = tmp34 * tmp34
tmp53 = tmp52 + tmp51
tmp54 = tmp53 * tmp17
tmp57 = tmp56 + tmp55
tmp59 = tmp58 + tmp57
tmp61 = tmp60 + tmp59
tmp63 = tmp62 + tmp61
tmp65 = tmp64 + tmp63
tmp67 = tmp66 + tmp65
tmp69 = tmp68 + tmp67
tmp71 = tmp70 + tmp69
tmp72 = tmp71 * tmp17
tmp73 = tmp55 * tmp55
tmp74 = tmp56 * tmp56
tmp75 = tmp74 + tmp73
tmp76 = tmp58 * tmp58
tmp77 = tmp76 + tmp75
tmp78 = tmp60 * tmp60
tmp79 = tmp78 + tmp77
tmp80 = tmp62 * tmp62
tmp81 = tmp80 + tmp79
tmp82 = tmp64 * tmp64
tmp83 = tmp82 + tmp81
tmp84 = tmp66 * tmp66
tmp85 = tmp84 + tmp83
tmp86 = tmp68 * tmp68
tmp87 = tmp86 + tmp85
tmp88 = tmp70 * tmp70
tmp89 = tmp88 + tmp87
tmp90 = tmp89 * tmp17
tmp91 = 2.0
tmp92 = tmp36 * tmp91
tmp93 = tmp92 * tmp72
tmp94 = 0.0001
tmp95 = tmp93 + tmp94
tmp96 = tmp36 * tmp72
tmp97 = tmp18 - tmp96
tmp98 = tmp97 * tmp91
tmp99 = 0.0009
tmp100 = tmp98 + tmp99
tmp101 = tmp95 * tmp100
tmp102 = tmp36 * tmp36
tmp103 = tmp72 * tmp72
tmp104 = tmp102 + tmp103
tmp105 = tmp104 + tmp94
tmp106 = tmp54 - tmp102
tmp107 = tmp90 - tmp103
tmp108 = tmp106 + tmp107
tmp109 = tmp108 + tmp99
tmp110 = tmp105 * tmp109
tmp111 = tmp101 / tmp110
tmp112 = 1.0
tmp113 = tmp112 - tmp111
tmp114 = 0.5
tmp115 = tmp113 * tmp114
tmp116 = 0.0
tmp117 = triton_helpers.maximum(tmp115, tmp116)
tmp118 = triton_helpers.minimum(tmp117, tmp112)
tl.store(in_out_ptr0 + (x3), tmp118, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, y, mul], Original ATen: [aten.reflection_pad2d, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_reflection_pad2d_0.run(arg0_1, arg1_1, buf2, 576, grid=grid(576), stream=stream0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf0; del buf0 # reuse
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x, mu_x, mul_2, y, mu_y, mul_3, add, mul, avg_pool2d_4, mul_1, sigma_xy, mul_4, add_1, SSIM_n, pow_5, pow_6, add_2, add_3, pow_1, avg_pool2d_2, pow_2, sigma_x, pow_3, avg_pool2d_3, pow_4, sigma_y, add_4, add_5, SSIM_d, truediv, sub_3, truediv_1, clamp], Original ATen: [aten.reflection_pad2d, aten.avg_pool2d, aten.mul, aten.add, aten.sub, aten.pow, aten.div, aten.rsub, aten.clamp]
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1.run(buf7, buf2, arg0_1, arg1_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del buf2
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y +
self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask)
tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask)
tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-3 + x1) + 16 * x2), xmask)
tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy
='evict_last')
tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 *
tl_math.abs(-2 + x1) + 16 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tmp21 = tmp20 + tmp19
tmp23 = tmp22 + tmp21
tmp25 = tmp24 + tmp23
tmp27 = tmp26 + tmp25
tmp29 = tmp28 + tmp27
tmp31 = tmp30 + tmp29
tmp33 = tmp32 + tmp31
tmp35 = tmp34 + tmp33
tmp36 = tmp35 * tmp17
tmp37 = tmp19 * tmp19
tmp38 = tmp20 * tmp20
tmp39 = tmp38 + tmp37
tmp40 = tmp22 * tmp22
tmp41 = tmp40 + tmp39
tmp42 = tmp24 * tmp24
tmp43 = tmp42 + tmp41
tmp44 = tmp26 * tmp26
tmp45 = tmp44 + tmp43
tmp46 = tmp28 * tmp28
tmp47 = tmp46 + tmp45
tmp48 = tmp30 * tmp30
tmp49 = tmp48 + tmp47
tmp50 = tmp32 * tmp32
tmp51 = tmp50 + tmp49
tmp52 = tmp34 * tmp34
tmp53 = tmp52 + tmp51
tmp54 = tmp53 * tmp17
tmp57 = tmp56 + tmp55
tmp59 = tmp58 + tmp57
tmp61 = tmp60 + tmp59
tmp63 = tmp62 + tmp61
tmp65 = tmp64 + tmp63
tmp67 = tmp66 + tmp65
tmp69 = tmp68 + tmp67
tmp71 = tmp70 + tmp69
tmp72 = tmp71 * tmp17
tmp73 = tmp55 * tmp55
tmp74 = tmp56 * tmp56
tmp75 = tmp74 + tmp73
tmp76 = tmp58 * tmp58
tmp77 = tmp76 + tmp75
tmp78 = tmp60 * tmp60
tmp79 = tmp78 + tmp77
tmp80 = tmp62 * tmp62
tmp81 = tmp80 + tmp79
tmp82 = tmp64 * tmp64
tmp83 = tmp82 + tmp81
tmp84 = tmp66 * tmp66
tmp85 = tmp84 + tmp83
tmp86 = tmp68 * tmp68
tmp87 = tmp86 + tmp85
tmp88 = tmp70 * tmp70
tmp89 = tmp88 + tmp87
tmp90 = tmp89 * tmp17
tmp91 = 2.0
tmp92 = tmp36 * tmp91
tmp93 = tmp92 * tmp72
tmp94 = 0.0001
tmp95 = tmp93 + tmp94
tmp96 = tmp36 * tmp72
tmp97 = tmp18 - tmp96
tmp98 = tmp97 * tmp91
tmp99 = 0.0009
tmp100 = tmp98 + tmp99
tmp101 = tmp95 * tmp100
tmp102 = tmp36 * tmp36
tmp103 = tmp72 * tmp72
tmp104 = tmp102 + tmp103
tmp105 = tmp104 + tmp94
tmp106 = tmp54 - tmp102
tmp107 = tmp90 - tmp103
tmp108 = tmp106 + tmp107
tmp109 = tmp108 + tmp99
tmp110 = tmp105 * tmp109
tmp111 = tmp101 / tmp110
tmp112 = 1.0
tmp113 = tmp112 - tmp111
tmp114 = 0.5
tmp115 = tmp113 * tmp114
tmp116 = 0.0
tmp117 = triton_helpers.maximum(tmp115, tmp116)
tmp118 = triton_helpers.minimum(tmp117, tmp112)
tl.store(in_out_ptr0 + x3, tmp118, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1,
buf2, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf0
del buf0
buf7 = buf6
del buf6
triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[
grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del buf2
return buf7,
class SSIMNew(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIMNew, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| minjabenho/image2pcl | SSIM | false | 7,242 | [
"Apache-2.0"
] | 1 | 7e696ee48edae30814d32f32e605ad6cf8bf702c | https://github.com/minjabenho/image2pcl/tree/7e696ee48edae30814d32f32e605ad6cf8bf702c | import torch
import torch.nn as nn
class Model(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super().__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y +
self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
dream_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/og/cogpl4jkz54u3lvipd4h76h5lkcrid5pisie2s5mzdsizeuim3pb.py
# Topologically Sorted Source Nodes: [sub, diff], Original ATen: [aten.sub, aten.sum]
# Source node to ATen node mapping:
# diff => sum_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub,), kwargs = {})
triton_per_fused_sub_sum_0 = async_compile.triton('triton_per_fused_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub, diff], Original ATen: [aten.sub, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class dream_loss(torch.nn.Module):
def __init__(self):
super(dream_loss, self).__init__()
def forward(self, yhat, y):
diff = torch.sum(yhat - y)
return diff
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class dream_lossNew(torch.nn.Module):
def __init__(self):
super(dream_lossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mkelcb/knet | dream_loss | false | 7,244 | [
"MIT"
] | 1 | f0e75f526c8bcdc6969052328b2b1b9cd6767cd8 | https://github.com/mkelcb/knet/tree/f0e75f526c8bcdc6969052328b2b1b9cd6767cd8 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, yhat, y):
diff = torch.sum(yhat - y)
return diff
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BertSelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ek/cekc4xnuyislvdovnzf5y3lkc2xvyqm5n6o243mths7wzeuvqbod.py
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
# Source node to ATen node mapping:
# attention_mask => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {})
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
triton_poi_fused_mul_rsub_1 = async_compile.triton('triton_poi_fused_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -10000.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp9 = tmp2 - tmp8
tmp10 = tmp9 * tmp4
tmp11 = tmp7 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp15 = tmp2 - tmp14
tmp16 = tmp15 * tmp4
tmp17 = tmp13 + tmp16
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp21 = tmp2 - tmp20
tmp22 = tmp21 * tmp4
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tmp36 = float("-inf")
tmp37 = tmp6 == tmp36
tmp38 = tmp37 == 0
tmp39 = tmp38.to(tl.int64)
tmp40 = (tmp39 != 0)
tmp41 = tmp11 == tmp36
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = (tmp43 != 0)
tmp45 = tmp40 | tmp44
tmp46 = tmp17 == tmp36
tmp47 = tmp46 == 0
tmp48 = tmp47.to(tl.int64)
tmp49 = (tmp48 != 0)
tmp50 = tmp45 | tmp49
tmp51 = tmp23 == tmp36
tmp52 = tmp51 == 0
tmp53 = tmp52.to(tl.int64)
tmp54 = (tmp53 != 0)
tmp55 = tmp50 | tmp54
tl.store(out_ptr0 + (x3), tmp24, xmask)
tl.store(out_ptr1 + (x3), tmp35, xmask)
tl.store(out_ptr2 + (x3), tmp55, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rs/crssvp4cfqnmhgd7rc7jzgyvj2wsdpbpk6qivlfh3twgsgwopsiy.py
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
# Source node to ATen node mapping:
# attention_mask => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {})
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_mul_rsub_2 = async_compile.triton('triton_poi_fused_mul_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x5 = xindex
x3 = (xindex // 64)
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x5), xmask)
tmp3 = tl.load(in_ptr1 + (x6 + (16*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = -10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp1, tmp14, tmp13)
tl.store(in_out_ptr0 + (x5), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_6, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
triton_poi_fused_mul_rsub_1.run(buf5, primals_1, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
triton_poi_fused_mul_rsub_2.run(buf9, buf8, primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf8
del primals_1
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_9
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
del buf11
return (reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query_states, key_states, value_states, attention_mask):
"""
Args:
query_states: (N, Lq, D)
key_states: (N, L, D)
value_states: (N, L, D)
attention_mask: (N, Lq, L)
Returns:
"""
attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0
mixed_query_layer = self.query(query_states)
mixed_key_layer = self.key(key_states)
mixed_value_layer = self.value(value_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -10000.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp9 = tmp2 - tmp8
tmp10 = tmp9 * tmp4
tmp11 = tmp7 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp15 = tmp2 - tmp14
tmp16 = tmp15 * tmp4
tmp17 = tmp13 + tmp16
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp21 = tmp2 - tmp20
tmp22 = tmp21 * tmp4
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tmp36 = float('-inf')
tmp37 = tmp6 == tmp36
tmp38 = tmp37 == 0
tmp39 = tmp38.to(tl.int64)
tmp40 = tmp39 != 0
tmp41 = tmp11 == tmp36
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tmp46 = tmp17 == tmp36
tmp47 = tmp46 == 0
tmp48 = tmp47.to(tl.int64)
tmp49 = tmp48 != 0
tmp50 = tmp45 | tmp49
tmp51 = tmp23 == tmp36
tmp52 = tmp51 == 0
tmp53 = tmp52.to(tl.int64)
tmp54 = tmp53 != 0
tmp55 = tmp50 | tmp54
tl.store(out_ptr0 + x3, tmp24, xmask)
tl.store(out_ptr1 + x3, tmp35, xmask)
tl.store(out_ptr2 + x3, tmp55, xmask)
@triton.jit
def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x5 = xindex
x3 = xindex // 64
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x5, xmask)
tmp3 = tl.load(in_ptr1 + (x6 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = -10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp1, tmp14, tmp13)
tl.store(in_out_ptr0 + x5, tmp15, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_mul_rsub_1[grid(64)](buf5, primals_1, buf6, buf7,
buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_mul_rsub_2[grid(256)](buf9, buf8, primals_1, buf6,
buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_1
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_9, buf10, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_9
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf11
return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertSelfAttentionNew(nn.Module):
def __init__(self, config):
super(BertSelfAttentionNew, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.query.weight
primals_3 = self.query.bias
primals_5 = self.key.weight
primals_6 = self.key.bias
primals_8 = self.value.weight
primals_9 = self.value.bias
primals_1 = input_0
primals_4 = input_1
primals_7 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| minjoong507/Image-Captioning-Transformer | BertSelfAttention | false | 7,247 | [
"MIT"
] | 1 | 813060f0bb656e336154173f11e99a80362c8c2a | https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query_states, key_states, value_states, attention_mask):
"""
Args:
query_states: (N, Lq, D)
key_states: (N, L, D)
value_states: (N, L, D)
attention_mask: (N, Lq, L)
Returns:
"""
attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0
mixed_query_layer = self.query(query_states)
mixed_key_layer = self.key(key_states)
mixed_value_layer = self.value(value_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5)}]
|
BertLMPredictionHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k6/ck6o2ucwdqtvjyw7bruyzgade2k6iruvl53t2wmqy2xkgypurpgf.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# erf => erf
# hidden_states_1 => mul_1
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sub => sub
# truediv => div
# u => mean
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + (x0), tmp31, xmask)
tl.store(out_ptr1 + (x0), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ew/cewcb66a7hyf2vxy6evimdhxxg6p7casfhukvhbgdoijgab2kyck.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# erf => erf
# hidden_states_1 => mul_1
# hidden_states_2 => add_2
# mul => mul
# mul_2 => mul_2
# sqrt => sqrt
# sub => sub
# truediv => div
# x => div_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_5), kwargs = {})
triton_poi_fused_add_div_erf_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_erf_mul_sqrt_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = tmp12 + tmp7
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp11 / tmp14
tmp16 = tmp0 * tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f2/cf2qmkaxlnr752lokw5qdyvpamejxsvuizodac6qtjtl7yt3h2kr.py
# Topologically Sorted Source Nodes: [hidden_states_3, hidden_states_4], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# hidden_states_3 => add_3
# hidden_states_4 => amax, exp, sub_2, sum_1
# Graph fragment:
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_3, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_add_2 = async_compile.triton('triton_poi_fused__softmax_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp6 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 + tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp4 - tmp11
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp7 - tmp11
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp10 - tmp11
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + (x4), tmp11, xmask)
tl.store(out_ptr1 + (x4), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/by/cby4o5k6splfl4jfpqk2vnklsyi6x32msi4s5nrkzdhs47djvavl.py
# Topologically Sorted Source Nodes: [hidden_states_3, hidden_states_4], Original ATen: [aten.add, aten._softmax]
# Source node to ATen node mapping:
# hidden_states_3 => add_3
# hidden_states_4 => amax, div_2, exp, sub_2, sum_1
# Graph fragment:
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_7), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_3, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_3 = async_compile.triton('triton_poi_fused__softmax_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = (xindex // 64)
x5 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x5 + (16*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x5 + (16*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + (x4), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt]
triton_poi_fused_add_div_erf_mul_sqrt_sub_1.run(primals_4, buf0, buf1, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 64, 4, 1), 0); del buf2 # reuse
buf6 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 64, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [hidden_states_3, hidden_states_4], Original ATen: [aten.add, aten._softmax]
triton_poi_fused__softmax_add_2.run(buf4, primals_7, buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [hidden_states_3, hidden_states_4], Original ATen: [aten.add, aten._softmax]
triton_poi_fused__softmax_add_3.run(buf7, primals_7, buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
del primals_7
return (buf7, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf7, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, hidden_states):
"""(N, L, D)"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.softmax = nn.Softmax(dim=1)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
hidden_states = self.softmax(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1,
vocab_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp9 * tmp3
tmp12 = libdevice.erf(tmp11)
tmp13 = tmp12 + tmp6
tmp14 = tmp10 * tmp13
tmp15 = tmp8 + tmp14
tmp17 = tmp16 * tmp1
tmp18 = tmp16 * tmp3
tmp19 = libdevice.erf(tmp18)
tmp20 = tmp19 + tmp6
tmp21 = tmp17 * tmp20
tmp22 = tmp15 + tmp21
tmp24 = tmp23 * tmp1
tmp25 = tmp23 * tmp3
tmp26 = libdevice.erf(tmp25)
tmp27 = tmp26 + tmp6
tmp28 = tmp24 * tmp27
tmp29 = tmp22 + tmp28
tmp30 = 4.0
tmp31 = tmp29 / tmp30
tmp32 = tmp8 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp14 - tmp31
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp21 - tmp31
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp28 - tmp31
tmp41 = tmp40 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp42 / tmp30
tl.store(out_ptr0 + x0, tmp31, xmask)
tl.store(out_ptr1 + x0, tmp43, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865475
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp11 = tmp9 - tmp10
tmp13 = tmp12 + tmp7
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp11 / tmp14
tmp16 = tmp0 * tmp15
tmp18 = tmp16 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp6 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 + tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp4 - tmp11
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp13 + tmp15
tmp17 = tmp7 - tmp11
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp10 - tmp11
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tl.store(out_ptr0 + x4, tmp11, xmask)
tl.store(out_ptr1 + x4, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 64
x5 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(in_out_ptr0 + x4, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4,
buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf2
buf6 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf1
triton_poi_fused__softmax_add_2[grid(64)](buf4, primals_7, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_add_3[grid(256)](buf7, primals_7, buf5,
buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del buf6
del primals_7
return buf7, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf7, primals_6
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, hidden_states):
"""(N, L, D)"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHeadNew(nn.Module):
def __init__(self, config):
super(BertLMPredictionHeadNew, self).__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_2 = self.bias
primals_1 = self.transform.dense.weight
primals_4 = self.transform.dense.bias
primals_5 = self.transform.LayerNorm.weight
primals_7 = self.transform.LayerNorm.bias
primals_6 = self.decoder.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| minjoong507/Image-Captioning-Transformer | BertLMPredictionHead | false | 7,248 | [
"MIT"
] | 1 | 813060f0bb656e336154173f11e99a80362c8c2a | https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = gelu
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
def forward(self, hidden_states):
"""(N, L, D)"""
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.softmax = nn.Softmax(dim=1)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
hidden_states = self.softmax(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1,
vocab_size=4)}]
|
CAT_TokenEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rr/crr4uc5tissxvuw4bzh6fo6y7osbgp6kedo24pzjiyyorxmxyfkc.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy]
# Source node to ATen node mapping:
# pad => copy
# Graph fragment:
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1, %slice_2), kwargs = {})
# %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %copy, 2, 1, 5), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_7, 2, 0, 1), kwargs = {})
# %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 2, 5, 6), kwargs = {})
triton_poi_fused_copy_0 = async_compile.triton('triton_poi_fused_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 6
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp0 = y0
tmp1 = tl.full([1, 1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp4
tmp9 = tmp7 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp6
tmp12 = tl.load(in_ptr0 + ((-4) + x1 + (4*y0)), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp13 = float("nan")
tmp14 = tl.where(tmp10, tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tmp3 >= tmp4
tmp18 = tmp3 < tmp1
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp2
tmp21 = tl.load(in_ptr0 + ((-20) + x1 + (4*y0)), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp19, tmp21, tmp13)
tmp23 = tl.where(tmp5, tmp16, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x1 + (4*y0)), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.where(tmp30, tmp32, tmp13)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp26, tmp33, tmp34)
tmp36 = tmp0 >= tmp4
tmp37 = tmp0 < tmp1
tmp38 = tmp36 & tmp37
tmp39 = tl.load(in_ptr0 + ((-4) + x1 + (4*y0)), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp38, tmp39, tmp13)
tmp41 = tl.where(tmp26, tmp35, tmp40)
tmp42 = tl.where(tmp2, tmp25, tmp41)
tl.store(out_ptr0 + (y0 + (6*x1)), tmp42, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6v/c6vcpvedkedg5f7dbt5warppc4dneicv65dm4nutaxergetxh3x5.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%slice_scatter_default_2, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (10, 1, 3), (3, 3, 1))
assert_size_stride(primals_3, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy]
stream0 = get_raw_stream(0)
triton_poi_fused_copy_0.run(primals_1, buf1, 6, 4, grid=grid(6, 4), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 10, 4), (40, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_3, 160, grid=grid(160), stream=stream0)
del primals_3
return (reinterpret_tensor(buf3, (10, 4, 4), (4, 1, 40), 0), primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CAT_TokenEmbedding(nn.Module):
def __init__(self, c_in=1, d_feature=10):
super(CAT_TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_feature,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x: 'torch.Tensor'):
x = x.unsqueeze(1)
x = x.transpose(0, 2)
x = self.tokenConv(x).permute(1, 2, 0)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 6
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y0 = yindex
x1 = xindex
tmp0 = y0
tmp1 = tl.full([1, 1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK])
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK])
tmp8 = tmp7 >= tmp4
tmp9 = tmp7 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tmp10 & tmp6
tmp12 = tl.load(in_ptr0 + (-4 + x1 + 4 * y0), tmp11 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp13 = float('nan')
tmp14 = tl.where(tmp10, tmp12, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp6, tmp14, tmp15)
tmp17 = tmp3 >= tmp4
tmp18 = tmp3 < tmp1
tmp19 = tmp17 & tmp18
tmp20 = tmp19 & tmp2
tmp21 = tl.load(in_ptr0 + (-20 + x1 + 4 * y0), tmp20 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.where(tmp19, tmp21, tmp13)
tmp23 = tl.where(tmp5, tmp16, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x1 + 4 * y0), tmp31 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp33 = tl.where(tmp30, tmp32, tmp13)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp26, tmp33, tmp34)
tmp36 = tmp0 >= tmp4
tmp37 = tmp0 < tmp1
tmp38 = tmp36 & tmp37
tmp39 = tl.load(in_ptr0 + (-4 + x1 + 4 * y0), tmp38 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp40 = tl.where(tmp38, tmp39, tmp13)
tmp41 = tl.where(tmp26, tmp35, tmp40)
tmp42 = tl.where(tmp2, tmp25, tmp41)
tl.store(out_ptr0 + (y0 + 6 * x1), tmp42, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (10, 1, 3), (3, 3, 1))
assert_size_stride(primals_3, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 6), (6, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_0[grid(6, 4)](primals_1, buf1, 6, 4, XBLOCK=4,
YBLOCK=8, num_warps=1, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 10, 4), (40, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(160)](buf3, primals_3, 160,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf3, (10, 4, 4), (4, 1, 40), 0), primals_2, buf1
class CAT_TokenEmbeddingNew(nn.Module):
def __init__(self, c_in=1, d_feature=10):
super(CAT_TokenEmbeddingNew, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_feature,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, input_0):
primals_2 = self.tokenConv.weight
primals_3 = self.tokenConv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mkmysk123456789/Informer2020 | CAT_TokenEmbedding | false | 7,250 | [
"Apache-2.0"
] | 1 | ad4b895169a17db580aab6d2c09fd07e06c9b6fa | https://github.com/mkmysk123456789/Informer2020/tree/ad4b895169a17db580aab6d2c09fd07e06c9b6fa | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, c_in=1, d_feature=10):
super().__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_feature,
kernel_size=3, padding=padding, padding_mode='circular')
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_in',
nonlinearity='leaky_relu')
def forward(self, x: 'torch.Tensor'):
x = x.unsqueeze(1)
x = x.transpose(0, 2)
x = self.tokenConv(x).permute(1, 2, 0)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
BertAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ek/cekc4xnuyislvdovnzf5y3lkc2xvyqm5n6o243mths7wzeuvqbod.py
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
# Source node to ATen node mapping:
# attention_mask => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {})
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
triton_poi_fused_mul_rsub_1 = async_compile.triton('triton_poi_fused_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -10000.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp9 = tmp2 - tmp8
tmp10 = tmp9 * tmp4
tmp11 = tmp7 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp15 = tmp2 - tmp14
tmp16 = tmp15 * tmp4
tmp17 = tmp13 + tmp16
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp21 = tmp2 - tmp20
tmp22 = tmp21 * tmp4
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tmp36 = float("-inf")
tmp37 = tmp6 == tmp36
tmp38 = tmp37 == 0
tmp39 = tmp38.to(tl.int64)
tmp40 = (tmp39 != 0)
tmp41 = tmp11 == tmp36
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = (tmp43 != 0)
tmp45 = tmp40 | tmp44
tmp46 = tmp17 == tmp36
tmp47 = tmp46 == 0
tmp48 = tmp47.to(tl.int64)
tmp49 = (tmp48 != 0)
tmp50 = tmp45 | tmp49
tmp51 = tmp23 == tmp36
tmp52 = tmp51 == 0
tmp53 = tmp52.to(tl.int64)
tmp54 = (tmp53 != 0)
tmp55 = tmp50 | tmp54
tl.store(out_ptr0 + (x3), tmp24, xmask)
tl.store(out_ptr1 + (x3), tmp35, xmask)
tl.store(out_ptr2 + (x3), tmp55, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rs/crssvp4cfqnmhgd7rc7jzgyvj2wsdpbpk6qivlfh3twgsgwopsiy.py
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
# Source node to ATen node mapping:
# attention_mask => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {})
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_mul_rsub_2 = async_compile.triton('triton_poi_fused_mul_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x5 = xindex
x3 = (xindex // 64)
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x5), xmask)
tmp3 = tl.load(in_ptr1 + (x6 + (16*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = -10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp1, tmp14, tmp13)
tl.store(in_out_ptr0 + (x5), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hk/chkirlrxzb52fxbrq2rynamgt7aligt77yn6j6ihfk46whjvd374.py
# Topologically Sorted Source Nodes: [add_1, u, sub_1, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add_1 => add_1
# pow_1 => pow_1
# s => mean_1
# sub_1 => sub_2
# u => mean
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_mean_pow_sub_5 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3j/c3junievyif7korlhvw3ftme6dpzx2uodc43q34sexam3chrqpdb.py
# Topologically Sorted Source Nodes: [add_1, u, sub_1, add_2, sqrt, x_3, mul_1, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# hidden_states_2 => add_3
# mul_1 => mul_1
# sqrt => sqrt
# sub_1 => sub_2
# u => mean
# x_3 => div_2
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_6 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_6, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
triton_poi_fused_mul_rsub_1.run(buf5, primals_1, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul]
triton_poi_fused_mul_rsub_2.run(buf9, buf8, primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf8
del primals_1
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_8, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub_1, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
triton_poi_fused_add_mean_pow_sub_5.run(buf13, primals_4, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub_1, add_2, sqrt, x_3, mul_1, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_sqrt_sub_6.run(primals_11, buf13, primals_4, buf14, buf15, primals_12, buf16, 64, grid=grid(64), stream=stream0)
del buf14
del buf15
del primals_12
return (buf16, primals_4, primals_11, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query_states, key_states, value_states, attention_mask):
"""
Args:
query_states: (N, Lq, D)
key_states: (N, L, D)
value_states: (N, L, D)
attention_mask: (N, Lq, L)
Returns:
"""
attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0
mixed_query_layer = self.query(query_states)
mixed_key_layer = self.key(key_states)
mixed_value_layer = self.value(value_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.dropout)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, input_tensor, input_tensor,
attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, dropout=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = -10000.0
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tmp9 = tmp2 - tmp8
tmp10 = tmp9 * tmp4
tmp11 = tmp7 + tmp10
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp15 = tmp2 - tmp14
tmp16 = tmp15 * tmp4
tmp17 = tmp13 + tmp16
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp21 = tmp2 - tmp20
tmp22 = tmp21 * tmp4
tmp23 = tmp19 + tmp22
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tmp36 = float('-inf')
tmp37 = tmp6 == tmp36
tmp38 = tmp37 == 0
tmp39 = tmp38.to(tl.int64)
tmp40 = tmp39 != 0
tmp41 = tmp11 == tmp36
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tmp46 = tmp17 == tmp36
tmp47 = tmp46 == 0
tmp48 = tmp47.to(tl.int64)
tmp49 = tmp48 != 0
tmp50 = tmp45 | tmp49
tmp51 = tmp23 == tmp36
tmp52 = tmp51 == 0
tmp53 = tmp52.to(tl.int64)
tmp54 = tmp53 != 0
tmp55 = tmp50 | tmp54
tl.store(out_ptr0 + x3, tmp24, xmask)
tl.store(out_ptr1 + x3, tmp35, xmask)
tl.store(out_ptr2 + x3, tmp55, xmask)
@triton.jit
def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x5 = xindex
x3 = xindex // 64
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x5, xmask)
tmp3 = tl.load(in_ptr1 + (x6 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = 1.0
tmp5 = tmp4 - tmp3
tmp6 = -10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp2 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tmp14 = 0.0
tmp15 = tl.where(tmp1, tmp14, tmp13)
tl.store(in_out_ptr0 + x5, tmp15, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_mul_rsub_1[grid(64)](buf5, primals_1, buf6, buf7,
buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_mul_rsub_2[grid(256)](buf9, buf8, primals_1, buf6,
buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_1
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf10, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_4,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11,
buf13, primals_4, buf14, buf15, primals_12, buf16, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf14
del buf15
del primals_12
return buf16, primals_4, primals_11, buf9, reinterpret_tensor(buf10, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query_states, key_states, value_states, attention_mask):
"""
Args:
query_states: (N, Lq, D)
key_states: (N, L, D)
value_states: (N, L, D)
attention_mask: (N, Lq, L)
Returns:
"""
attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0
mixed_query_layer = self.query(query_states)
mixed_key_layer = self.key(key_states)
mixed_value_layer = self.value(value_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.dropout)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_2 = self.self.query.weight
primals_3 = self.self.query.bias
primals_5 = self.self.key.weight
primals_6 = self.self.key.bias
primals_7 = self.self.value.weight
primals_8 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.weight
primals_12 = self.output.LayerNorm.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| minjoong507/Image-Captioning-Transformer | BertAttention | false | 7,252 | [
"MIT"
] | 1 | 813060f0bb656e336154173f11e99a80362c8c2a | https://github.com/minjoong507/Image-Captioning-Transformer/tree/813060f0bb656e336154173f11e99a80362c8c2a | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, query_states, key_states, value_states, attention_mask):
"""
Args:
query_states: (N, Lq, D)
key_states: (N, L, D)
value_states: (N, L, D)
attention_mask: (N, Lq, L)
Returns:
"""
attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0
mixed_query_layer = self.query(query_states)
mixed_key_layer = self.key(key_states)
mixed_value_layer = self.value(value_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
"""
Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.dropout)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
# ... truncated (>4000 chars) for memory efficiency |
BoundSoftmaxImpl | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ux/cuxdg3imhwde5un3in6ey2455bihwhfooq3dlrzba6pk5pyugy5w.py
# Topologically Sorted Source Nodes: [sub, x], Original ATen: [aten.sub, aten.exp]
# Source node to ATen node mapping:
# sub => sub
# x => exp
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %unsqueeze), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused_exp_sub_0 = async_compile.triton('triton_poi_fused_exp_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rk/crk57ot7q5ezhhll7rh73icrnodouu7urii76vqnf3xfxrtrazbi.py
# Topologically Sorted Source Nodes: [s, truediv], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# s => sum_1
# truediv => div
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [4], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused_div_sum_1 = async_compile.triton('triton_poi_fused_div_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, x], Original ATen: [aten.sub, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_sub_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s, truediv], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_1.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BoundSoftmaxImpl(nn.Module):
def __init__(self, axis):
super().__init__()
self.axis = axis
def forward(self, x):
max_x = torch.max(x, dim=self.axis).values
assert self.axis == int(self.axis)
x = torch.exp(x - max_x.unsqueeze(self.axis))
s = torch.sum(x, dim=self.axis, keepdim=True)
return x / s
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'axis': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_sub_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_div_sum_1[grid(1024)](buf0, buf1, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
return buf1,
class BoundSoftmaxImplNew(nn.Module):
def __init__(self, axis):
super().__init__()
self.axis = axis
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mnmueller/auto_LiRPA | BoundSoftmaxImpl | false | 7,253 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, axis):
super().__init__()
self.axis = axis
def forward(self, x):
max_x = torch.max(x, dim=self.axis).values
assert self.axis == int(self.axis)
x = torch.exp(x - max_x.unsqueeze(self.axis))
s = torch.sum(x, dim=self.axis, keepdim=True)
return x / s
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CAT_TemporalEmbedding | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/aj/cajk2ecgc2ept7xftxkdh54a3ls4xyipv5ylqolq2texzer7dylf.py
# Topologically Sorted Source Nodes: [embedding, embedding_1, add, embedding_2, add_1, embedding_3, add_2, temporal_embed], Original ATen: [aten.embedding, aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# embedding => embedding
# embedding_1 => embedding_1
# embedding_2 => embedding_2
# embedding_3 => embedding_3
# temporal_embed => add_3
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg1_1, %select), kwargs = {})
# %embedding_1 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg2_1, %select_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%embedding, %embedding_1), kwargs = {})
# %embedding_2 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg3_1, %select_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %embedding_2), kwargs = {})
# %embedding_3 : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%arg4_1, %select_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %embedding_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 0.0), kwargs = {})
triton_poi_fused_add_embedding_0 = async_compile.triton('triton_poi_fused_add_embedding_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_embedding_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 10)
x0 = xindex % 10
x2 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 24, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert(((0 <= tmp5) & (tmp5 < 24)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 24")
tmp7 = tl.load(in_ptr1 + (x0 + (10*tmp5)), xmask)
tmp9 = tmp8.to(tl.int64)
tmp10 = tl.full([XBLOCK], 7, tl.int32)
tmp11 = tmp9 + tmp10
tmp12 = tmp9 < 0
tmp13 = tl.where(tmp12, tmp11, tmp9)
tl.device_assert(((0 <= tmp13) & (tmp13 < 7)) | ~(xmask), "index out of bounds: 0 <= tmp13 < 7")
tmp15 = tl.load(in_ptr2 + (x0 + (10*tmp13)), xmask)
tmp16 = tmp7 + tmp15
tmp18 = tmp17.to(tl.int64)
tmp19 = tl.full([XBLOCK], 32, tl.int32)
tmp20 = tmp18 + tmp19
tmp21 = tmp18 < 0
tmp22 = tl.where(tmp21, tmp20, tmp18)
tl.device_assert(((0 <= tmp22) & (tmp22 < 32)) | ~(xmask), "index out of bounds: 0 <= tmp22 < 32")
tmp24 = tl.load(in_ptr3 + (x0 + (10*tmp22)), xmask)
tmp25 = tmp16 + tmp24
tmp27 = tmp26.to(tl.int64)
tmp28 = tl.full([XBLOCK], 13, tl.int32)
tmp29 = tmp27 + tmp28
tmp30 = tmp27 < 0
tmp31 = tl.where(tmp30, tmp29, tmp27)
tl.device_assert(((0 <= tmp31) & (tmp31 < 13)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 13")
tmp33 = tl.load(in_ptr4 + (x0 + (10*tmp31)), xmask)
tmp34 = tmp25 + tmp33
tmp35 = 0.0
tmp36 = tmp34 + tmp35
tl.store(out_ptr0 + (x2), tmp36, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (24, 10), (10, 1))
assert_size_stride(arg2_1, (7, 10), (10, 1))
assert_size_stride(arg3_1, (32, 10), (10, 1))
assert_size_stride(arg4_1, (13, 10), (10, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 10), (40, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, embedding_1, add, embedding_2, add_1, embedding_3, add_2, temporal_embed], Original ATen: [aten.embedding, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_embedding_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 160, grid=grid(160), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return (reinterpret_tensor(buf0, (10, 4, 4), (1, 40, 10), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((24, 10), (10, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((7, 10), (10, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((32, 10), (10, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((13, 10), (10, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class CAT_FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(CAT_FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class CAT_TemporalEmbedding(nn.Module):
def __init__(self, d_feature=10, embed_type='fixed', freq='h'):
super(CAT_TemporalEmbedding, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = CAT_FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_feature)
self.hour_embed = Embed(hour_size, d_feature)
self.weekday_embed = Embed(weekday_size, d_feature)
self.day_embed = Embed(day_size, d_feature)
self.month_embed = Embed(month_size, d_feature)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
temporal_embed = hour_x + weekday_x + day_x + month_x + minute_x
temporal_embed = temporal_embed.permute(2, 0, 1)
return temporal_embed
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 160
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 10
x0 = xindex % 10
x2 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 24, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 24) | ~xmask,
'index out of bounds: 0 <= tmp5 < 24')
tmp7 = tl.load(in_ptr1 + (x0 + 10 * tmp5), xmask)
tmp9 = tmp8.to(tl.int64)
tmp10 = tl.full([XBLOCK], 7, tl.int32)
tmp11 = tmp9 + tmp10
tmp12 = tmp9 < 0
tmp13 = tl.where(tmp12, tmp11, tmp9)
tl.device_assert((0 <= tmp13) & (tmp13 < 7) | ~xmask,
'index out of bounds: 0 <= tmp13 < 7')
tmp15 = tl.load(in_ptr2 + (x0 + 10 * tmp13), xmask)
tmp16 = tmp7 + tmp15
tmp18 = tmp17.to(tl.int64)
tmp19 = tl.full([XBLOCK], 32, tl.int32)
tmp20 = tmp18 + tmp19
tmp21 = tmp18 < 0
tmp22 = tl.where(tmp21, tmp20, tmp18)
tl.device_assert((0 <= tmp22) & (tmp22 < 32) | ~xmask,
'index out of bounds: 0 <= tmp22 < 32')
tmp24 = tl.load(in_ptr3 + (x0 + 10 * tmp22), xmask)
tmp25 = tmp16 + tmp24
tmp27 = tmp26.to(tl.int64)
tmp28 = tl.full([XBLOCK], 13, tl.int32)
tmp29 = tmp27 + tmp28
tmp30 = tmp27 < 0
tmp31 = tl.where(tmp30, tmp29, tmp27)
tl.device_assert((0 <= tmp31) & (tmp31 < 13) | ~xmask,
'index out of bounds: 0 <= tmp31 < 13')
tmp33 = tl.load(in_ptr4 + (x0 + 10 * tmp31), xmask)
tmp34 = tmp25 + tmp33
tmp35 = 0.0
tmp36 = tmp34 + tmp35
tl.store(out_ptr0 + x2, tmp36, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (24, 10), (10, 1))
assert_size_stride(arg2_1, (7, 10), (10, 1))
assert_size_stride(arg3_1, (32, 10), (10, 1))
assert_size_stride(arg4_1, (13, 10), (10, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 10), (40, 10, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_embedding_0[grid(160)](arg0_1, arg1_1, arg2_1,
arg3_1, arg4_1, buf0, 160, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return reinterpret_tensor(buf0, (10, 4, 4), (1, 40, 10), 0),
class CAT_FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(CAT_FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class CAT_TemporalEmbeddingNew(nn.Module):
def __init__(self, d_feature=10, embed_type='fixed', freq='h'):
super(CAT_TemporalEmbeddingNew, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = CAT_FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_feature)
self.hour_embed = Embed(hour_size, d_feature)
self.weekday_embed = Embed(weekday_size, d_feature)
self.day_embed = Embed(day_size, d_feature)
self.month_embed = Embed(month_size, d_feature)
def forward(self, input_0):
arg1_1 = self.hour_embed.emb.weight
arg2_1 = self.weekday_embed.emb.weight
arg3_1 = self.day_embed.emb.weight
arg4_1 = self.month_embed.emb.weight
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
| mkmysk123456789/Informer2020 | CAT_TemporalEmbedding | false | 7,254 | [
"Apache-2.0"
] | 1 | ad4b895169a17db580aab6d2c09fd07e06c9b6fa | https://github.com/mkmysk123456789/Informer2020/tree/ad4b895169a17db580aab6d2c09fd07e06c9b6fa | import math
import torch
import torch.nn as nn
class CAT_FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super().__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(
10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class Model(nn.Module):
def __init__(self, d_feature=10, embed_type='fixed', freq='h'):
super().__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = CAT_FixedEmbedding if embed_type == 'fixed' else nn.Embedding
if freq == 't':
self.minute_embed = Embed(minute_size, d_feature)
self.hour_embed = Embed(hour_size, d_feature)
self.weekday_embed = Embed(weekday_size, d_feature)
self.day_embed = Embed(day_size, d_feature)
self.month_embed = Embed(month_size, d_feature)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self,
'minute_embed') else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
temporal_embed = hour_x + weekday_x + day_x + month_x + minute_x
temporal_embed = temporal_embed.permute(2, 0, 1)
return temporal_embed
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
CQAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gv/cgvyzvgb4s6skjl2lcdf54y4sqcmmvdkvv2hcpobs5hraiugivrp.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# matmul => clone_2, view
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone_2, [16, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(y0 // 4)) + (y0 % 4)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pa/cpa2xlx5dvbxg7yen7xxu7aclanjppmhz3pxswu4bw5tkkqjh7rr.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %primals_5), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uf/cufxi6e4u5mvvfd5sut4qxvbrhzb7usdtxtg3ze2t6ajs3np5275.py
# Topologically Sorted Source Nodes: [add, res, res_1, mul_1, sub, mul_2, add_2, mul_3, sub_1, mul_4, add_3], Original ATen: [aten.add, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_2 => add_3
# add_3 => add_4
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# res => add_1
# res_1 => add_2
# sub => sub
# sub_1 => sub_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %expand_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %bmm), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_6), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %primals_8), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_8), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -1e+30), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %primals_7), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_7), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, -1e+30), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
triton_poi_fused_add_mul_rsub_2 = async_compile.triton('triton_poi_fused_add_mul_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask)
tmp5 = tl.load(in_ptr3 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr4 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr5 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp8
tmp12 = -1e+30
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp7 * tmp15
tmp17 = tmp10 - tmp15
tmp18 = tmp17 * tmp12
tmp19 = tmp16 + tmp18
tl.store(out_ptr0 + (x4), tmp14, xmask)
tl.store(out_ptr1 + (x4), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S1 => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_3, [2], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5q/c5q2cwpxtjxxz7h6xna43qv2cdyea56heflv2ye7d7mtmtdm7twa.py
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S2 => amax_1, exp_1, sub_3
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_4, [1], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cg/ccg5e776j77ye72qtmo5nfcxjaz6zv34474xpm34f62r6hfxzo6g.py
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S2 => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pj/cpjglqinm2mgqqclt3c66vcfcnwohgtuw2thbq5rdw75hhx4fn5r.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute, %bmm_1, %mul_5, %mul_6], 2), kwargs = {})
triton_poi_fused_cat_7 = async_compile.triton('triton_poi_fused_cat_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16) % 4
x2 = (xindex // 64)
x3 = (xindex // 16)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x3) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x1 + (4*((-8) + x0)) + (16*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + ((4*x3) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tmp21 = tl.full([1], 16, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr0 + (x1 + (4*((-12) + x0)) + (16*x2)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr2 + ((4*x3) + ((-12) + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + (x4), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone, aten._unsafe_view]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone, aten._unsafe_view]
triton_poi_fused__unsafe_view_clone_0.run(primals_2, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_4, out=buf3)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_1, primals_5, buf4, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, subres2], Original ATen: [aten.mul, aten.bmm]
extern_kernels.bmm(buf4, primals_2, out=buf5)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, res, res_1, mul_1, sub, mul_2, add_2, mul_3, sub_1, mul_4, add_3], Original ATen: [aten.add, aten.mul, aten.rsub]
triton_poi_fused_add_mul_rsub_2.run(buf1, buf3, buf5, primals_6, primals_8, primals_7, buf6, buf9, 64, grid=grid(64), stream=stream0)
del buf1
del buf3
del primals_6
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [S1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [S2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_6.run(buf10, buf11, 64, grid=grid(64), stream=stream0)
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [A], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [B], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf14)
del buf13
buf15 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_7.run(primals_1, buf12, buf14, buf15, 256, grid=grid(256), stream=stream0)
del buf12
del buf14
return (reinterpret_tensor(buf15, (4, 16, 4), (64, 1, 16), 0), primals_7, primals_8, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), primals_2, buf8, buf11, reinterpret_tensor(buf2, (4, 16), (1, 4), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class CQAttention(nn.Module):
def __init__(self, d_model, dropout=0.1):
super().__init__()
w4C = torch.empty(d_model, 1)
w4Q = torch.empty(d_model, 1)
w4mlu = torch.empty(1, 1, d_model)
nn.init.xavier_uniform_(w4C)
nn.init.xavier_uniform_(w4Q)
nn.init.xavier_uniform_(w4mlu)
self.w4C = nn.Parameter(w4C)
self.w4Q = nn.Parameter(w4Q)
self.w4mlu = nn.Parameter(w4mlu)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
self.dropout = dropout
def forward(self, C, Q, Cmask, Qmask):
C = C.transpose(1, 2)
Q = Q.transpose(1, 2)
batch_size_c = C.size()[0]
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
S = self.trilinear_for_attention(C, Q)
Cmask = Cmask.view(batch_size_c, Lc, 1)
Qmask = Qmask.view(batch_size_c, 1, Lq)
S1 = F.softmax(mask_logits(S, Qmask), dim=2)
S2 = F.softmax(mask_logits(S, Cmask), dim=1)
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out.transpose(1, 2)
def trilinear_for_attention(self, C, Q):
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
dropout = self.dropout
C = F.dropout(C, p=dropout, training=self.training)
Q = F.dropout(Q, p=dropout, training=self.training)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1]
)
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
1]), torch.rand([4, 1, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask &
ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + x4, xmask)
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr4 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp7 = tmp4 + tmp6
tmp9 = tmp7 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp8
tmp12 = -1e+30
tmp13 = tmp11 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp7 * tmp15
tmp17 = tmp10 - tmp15
tmp18 = tmp17 * tmp12
tmp19 = tmp16 + tmp18
tl.store(out_ptr0 + x4, tmp14, xmask)
tl.store(out_ptr1 + x4, tmp19, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex // 16
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x1 + 4 * (-8 + x0) + 16 * x2), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (4 * x3 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp14, tmp17, tmp18)
tmp20 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp23 = tl.load(in_ptr0 + (x1 + 4 * (-12 + x0) + 16 * x2), tmp20 &
xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr2 + (4 * x3 + (-12 + x0)), tmp20 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 * tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp20, tmp25, tmp26)
tmp28 = tl.where(tmp14, tmp19, tmp27)
tmp29 = tl.where(tmp9, tmp10, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x4, tmp30, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 1), (1, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_1, buf0,
16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
triton_poi_fused__unsafe_view_clone_0[grid(16, 4)](primals_2, buf2,
16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf2, primals_4, out=buf3)
del primals_4
buf4 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_mul_1[grid(64)](primals_1, primals_5, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, primals_2, out=buf5)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_2[grid(64)](buf1, buf3, buf5,
primals_6, primals_8, primals_7, buf6, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
del buf3
del primals_6
buf7 = buf5
del buf5
triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = buf7
del buf7
triton_poi_fused__softmax_5[grid(64)](buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = buf9
del buf9
triton_poi_fused__softmax_6[grid(64)](buf10, buf11, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf12 = buf10
del buf10
extern_kernels.bmm(buf8, reinterpret_tensor(primals_2, (4, 4, 4), (
16, 1, 4), 0), out=buf12)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf11, (4, 4, 4), (16,
1, 4), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(primals_1, (4, 4, 4),
(16, 1, 4), 0), out=buf14)
del buf13
buf15 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_cat_7[grid(256)](primals_1, buf12, buf14, buf15,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf12
del buf14
return reinterpret_tensor(buf15, (4, 16, 4), (64, 1, 16), 0
), primals_7, primals_8, reinterpret_tensor(primals_1, (4, 4, 4), (
16, 1, 4), 0), primals_2, buf8, buf11, reinterpret_tensor(buf2, (4,
16), (1, 4), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0)
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class CQAttentionNew(nn.Module):
def __init__(self, d_model, dropout=0.1):
super().__init__()
w4C = torch.empty(d_model, 1)
w4Q = torch.empty(d_model, 1)
w4mlu = torch.empty(1, 1, d_model)
nn.init.xavier_uniform_(w4C)
nn.init.xavier_uniform_(w4Q)
nn.init.xavier_uniform_(w4mlu)
self.w4C = nn.Parameter(w4C)
self.w4Q = nn.Parameter(w4Q)
self.w4mlu = nn.Parameter(w4mlu)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
self.dropout = dropout
def trilinear_for_attention(self, C, Q):
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
dropout = self.dropout
C = F.dropout(C, p=dropout, training=self.training)
Q = F.dropout(Q, p=dropout, training=self.training)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1]
)
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
def forward(self, input_0, input_1, input_2, input_3):
primals_3 = self.w4C
primals_4 = self.w4Q
primals_5 = self.w4mlu
primals_6 = self.bias
primals_1 = input_0
primals_2 = input_1
primals_7 = input_2
primals_8 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| mirbostani/QA-KD-AL | CQAttention | false | 7,255 | [
"MIT"
] | 1 | 0ec8756ee06ae2a204a5e9110503bc697e9108fb | https://github.com/mirbostani/QA-KD-AL/tree/0ec8756ee06ae2a204a5e9110503bc697e9108fb | import torch
import torch.nn as nn
import torch.nn.functional as F
def mask_logits(target, mask):
mask = mask.type(torch.float32)
return target * mask + (1 - mask) * -1e+30
class Model(nn.Module):
def __init__(self, d_model, dropout=0.1):
super().__init__()
w4C = torch.empty(d_model, 1)
w4Q = torch.empty(d_model, 1)
w4mlu = torch.empty(1, 1, d_model)
nn.init.xavier_uniform_(w4C)
nn.init.xavier_uniform_(w4Q)
nn.init.xavier_uniform_(w4mlu)
self.w4C = nn.Parameter(w4C)
self.w4Q = nn.Parameter(w4Q)
self.w4mlu = nn.Parameter(w4mlu)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
self.dropout = dropout
def forward(self, C, Q, Cmask, Qmask):
C = C.transpose(1, 2)
Q = Q.transpose(1, 2)
batch_size_c = C.size()[0]
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
S = self.trilinear_for_attention(C, Q)
Cmask = Cmask.view(batch_size_c, Lc, 1)
Qmask = Qmask.view(batch_size_c, 1, Lq)
S1 = F.softmax(mask_logits(S, Qmask), dim=2)
S2 = F.softmax(mask_logits(S, Cmask), dim=1)
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out.transpose(1, 2)
def trilinear_for_attention(self, C, Q):
_batch_size, Lc, _d_model = C.shape
_batch_size, Lq, _d_model = Q.shape
dropout = self.dropout
C = F.dropout(C, p=dropout, training=self.training)
Q = F.dropout(Q, p=dropout, training=self.training)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, Lq])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, Lc, -1]
)
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
1]), torch.rand([4, 1, 4])]
def get_init_inputs():
return [4]
|
Transition | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ke/ckeku6fry6eqbkps6aynkemvnmb54cigdg6vs53zhq5lp6aghkmk.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# out_1 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%convolution, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_2 = async_compile.triton('triton_poi_fused_avg_pool2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
return (buf3, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True)
def forward(self, x):
out = self.conv(F.relu(x))
out = F.avg_pool2d(out, 2)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'out_planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_avg_pool2d_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf3, primals_2, buf0, buf2
class TransitionNew(nn.Module):
def __init__(self, in_planes, out_planes):
super(TransitionNew, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mnmueller/auto_LiRPA | Transition | false | 7,256 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_planes, out_planes):
super().__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True)
def forward(self, x):
out = self.conv(F.relu(x))
out = F.avg_pool2d(out, 2)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
mlp_2layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/el/cel3ti6ei3rprs2l5m6qs62p6md67qhlcbr3oxhxsqfmherljfbo.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (10, 256), (256, 1))
assert_size_stride(primals_5, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class mlp_2layer(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_2layer, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (10, 256), (256, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(256, 10), (1, 256), 0), alpha=1, beta=1, out=buf2)
del primals_5
return buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, primals_4
class mlp_2layerNew(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_2layerNew, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| mnmueller/auto_LiRPA | mlp_2layer | false | 7,257 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super().__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BertLayerNormNoVar | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ke/ckesoupia4od4yj57n7ovmr2wav7eopyyngdjidks2sqhi3s4yx5.py
# Topologically Sorted Source Nodes: [u, x, mul, add], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# u => mean
# x => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_mean_mul_sub_0 = async_compile.triton('triton_poi_fused_add_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp14 = tmp12 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u, x, mul, add], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_mul_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BertLayerNormNoVar(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNormNoVar, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
x = x - u
return self.weight * x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp14 = tmp12 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_sub_0[grid(256)](primals_2, primals_1,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class BertLayerNormNoVarNew(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNormNoVarNew, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mnmueller/auto_LiRPA | BertLayerNormNoVar | false | 7,258 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
x = x - u
return self.weight * x + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
mlp_5layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/el/cel3ti6ei3rprs2l5m6qs62p6md67qhlcbr3oxhxsqfmherljfbo.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/i5/ci5f4nyelvfg4yf2o65ompoikj7ejkd32vb6hqtyrgycc5eswrpx.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (256, 256), (256, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (128, 256), (256, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 1024, grid=grid(1024), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 256), (1, 256), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf5, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (256, 128), (1, 256), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf7, primals_9, 512, grid=grid(512), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_11
return (buf8, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class mlp_5layer(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_5layer, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 256 * width)
self.fc3 = nn.Linear(256 * width, 256 * width)
self.fc4 = nn.Linear(256 * width, 128 * width)
self.fc5 = nn.Linear(128 * width, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (256, 256), (256, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (128, 256), (256, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (10, 128), (128, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 256), (
1, 256), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(1024)](buf3, primals_5, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 256), (
1, 256), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(1024)](buf5, primals_7, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (256, 128), (
1, 256), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_1[grid(512)](buf7, primals_9, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf7, reinterpret_tensor(
primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8)
del primals_11
return buf8, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4
class mlp_5layerNew(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_5layerNew, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 256 * width)
self.fc3 = nn.Linear(256 * width, 256 * width)
self.fc4 = nn.Linear(256 * width, 128 * width)
self.fc5 = nn.Linear(128 * width, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| mnmueller/auto_LiRPA | mlp_5layer | false | 7,259 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super().__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 256 * width)
self.fc3 = nn.Linear(256 * width, 256 * width)
self.fc4 = nn.Linear(256 * width, 128 * width)
self.fc5 = nn.Linear(128 * width, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
mlp_3layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/el/cel3ti6ei3rprs2l5m6qs62p6md67qhlcbr3oxhxsqfmherljfbo.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/i5/ci5f4nyelvfg4yf2o65ompoikj7ejkd32vb6hqtyrgycc5eswrpx.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (10, 128), (128, 1))
assert_size_stride(primals_7, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 512, grid=grid(512), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (buf4, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class mlp_3layer(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_3layer, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 128 * width)
self.fc3 = nn.Linear(128 * width, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (256, 64), (64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (128, 256), (256, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (10, 128), (128, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 128), (
1, 256), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(512)](buf3, primals_5, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(128, 10), (1, 128), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0
), buf1, buf3, primals_6, primals_4
class mlp_3layerNew(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super(mlp_3layerNew, self).__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 128 * width)
self.fc3 = nn.Linear(128 * width, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| mnmueller/auto_LiRPA | mlp_3layer | false | 7,261 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_ch, in_dim, width=1):
super().__init__()
self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width)
self.fc2 = nn.Linear(256 * width, 128 * width)
self.fc3 = nn.Linear(128 * width, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AdaptiveInstanceNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/52/c526p7iwll7vx7gobeuv6q3lym4ek7lbhopuykpcibc57bou263i.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jo/cjo3wxmtawsvu7opemz2xwvsknw4nxv74xivifhgb7csue6qqjbi.py
# Topologically Sorted Source Nodes: [out, mul_1, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul_1 => mul_2
# out => add, rsqrt, var_mean
# out_1 => add_1
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %view_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %getitem_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, mul_1, out_1], Original ATen: [aten._native_batch_norm_legit, aten.mul, aten.add]
triton_per_fused__native_batch_norm_legit_add_mul_1.run(buf5, primals_4, buf1, primals_2, buf2, buf6, 16, 16, grid=grid(16), stream=stream0)
del buf1
del primals_2
return (buf6, buf0, primals_3, primals_4, buf2, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.style(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tmp0 - tmp10
tmp26 = tmp25 * tmp21
tmp27 = tmp24 * tmp26
tmp30 = tmp28 + tmp29
tmp31 = tmp27 + tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4
), 0), out=buf1)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5,
primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf1
del primals_2
return buf6, buf0, primals_3, primals_4, buf2, buf5
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class AdaptiveInstanceNormNew(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input_0, input_1):
primals_2 = self.style.linear.bias
primals_1 = self.style.linear.weight_orig
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| mmhnoaccount/DeepChroma_128 | AdaptiveInstanceNorm | false | 7,262 | [
"MIT"
] | 1 | 337ec961bfc4ee44f48cb84e624c293ee2805b62 | https://github.com/mmhnoaccount/DeepChroma_128/tree/337ec961bfc4ee44f48cb84e624c293ee2805b62 | import torch
import torch.nn as nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
class Model(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm2d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
self.style.linear.bias.data[:in_channel] = 1
self.style.linear.bias.data[in_channel:] = 0
def forward(self, input, style):
style = self.style(style).unsqueeze(2).unsqueeze(3)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
cnn_4layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c3/cc3viy35ukuam57kedmccz7bf2yw3dvtjy2isdmexojnafyusphq.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/du/cdutqb4yatzkfvs63awjxej4mad3qwpiqzj32yeixxljxyqth7fk.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2b/c2bujjyeji7nhf4gfgxav4unhmpugynzwx2v63uhk7lp4nn5exsa.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (256, 16), (16, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (10, 256), (256, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 128, grid=grid(128), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (4, 16, 1, 1), (16, 1, 64, 64), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf7, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (4, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (buf6, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3, (4, 16), (16, 1), 0), buf5, primals_8, primals_6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class cnn_4layer(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=256):
super(cnn_4layer, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (256, 16), (16, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (10, 256), (256, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1))
buf3 = reinterpret_tensor(buf2, (4, 16, 1, 1), (16, 1, 64, 64), 0)
del buf2
buf7 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(64)](buf3,
primals_5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (4, 16), (16, 1), 0),
reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(1024)](buf5, primals_7, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(256, 10), (1, 256), 0), alpha=1, beta=1, out=buf6)
del primals_9
return buf6, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3
, (4, 16), (16, 1), 0), buf5, primals_8, primals_6, buf7
class cnn_4layerNew(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=256):
super(cnn_4layerNew, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| mnmueller/auto_LiRPA | cnn_4layer | false | 7,263 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=256):
super().__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
cnn_4layer_LeakyRelu | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tw/ctw4qtev5k5k65prtdigvjqeacfheyl32x6ntynwnhcj6evtqfp5.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 8
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rp/crpq6ay3bx2w6hni4wf7fm3xqw7kh3vra5vqoxiop4mru27uubam.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/z6/cz626pfdzck55lqwlwxmyjjebzy7psusnufojdzlpqnp7y7aa65h.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_2, mul_2, where_2
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.1), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_tensor, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (256, 16), (16, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (10, 256), (256, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1))
buf1 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 128, grid=grid(128), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 1, 1), (16, 1, 1, 1))
buf4 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
buf5 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf3
del primals_5
buf6 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (4, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf6)
buf7 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf8 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, primals_7, buf7, buf8, 1024, grid=grid(1024), stream=stream0)
del buf6
del primals_7
buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf9)
del primals_9
return (buf9, primals_1, primals_3, primals_4, buf1, buf2, buf4, reinterpret_tensor(buf5, (4, 16), (16, 1), 0), buf7, buf8, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class cnn_4layer_LeakyRelu(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=256, alpha=0.1):
super(cnn_4layer_LeakyRelu, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, 10)
self.alpha = alpha
def forward(self, x):
x = F.leaky_relu(self.conv1(x), self.alpha)
x = F.leaky_relu(self.conv2(x), self.alpha)
x = x.view(x.size(0), -1)
x = F.leaky_relu(self.fc1(x), self.alpha)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (256, 16), (16, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (10, 256), (256, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1))
buf1 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(128)](buf0,
primals_2, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 1, 1), (16, 1, 1, 1))
buf4 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
buf5 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32
)
triton_poi_fused_convolution_leaky_relu_1[grid(64)](buf3, primals_5,
buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
del primals_5
buf6 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (4, 16), (16, 1), 0),
reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf6)
buf7 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf8 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(1024)](buf6, primals_7, buf7,
buf8, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_7
buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8,
(256, 10), (1, 256), 0), alpha=1, beta=1, out=buf9)
del primals_9
return (buf9, primals_1, primals_3, primals_4, buf1, buf2, buf4,
reinterpret_tensor(buf5, (4, 16), (16, 1), 0), buf7, buf8,
primals_8, primals_6)
class cnn_4layer_LeakyReluNew(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=256, alpha=0.1):
super(cnn_4layer_LeakyReluNew, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, 10)
self.alpha = alpha
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| mnmueller/auto_LiRPA | cnn_4layer_LeakyRelu | false | 7,264 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=256, alpha=0.1):
super().__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, 10)
self.alpha = alpha
def forward(self, x):
x = F.leaky_relu(self.conv1(x), self.alpha)
x = F.leaky_relu(self.conv2(x), self.alpha)
x = x.view(x.size(0), -1)
x = F.leaky_relu(self.fc1(x), self.alpha)
x = self.fc2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Net2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# out_3 => relu_1
# out_4 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300, ), (1, ))
assert_size_stride(primals_6, (1, 300), (300, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 25600, grid=grid(25600), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf7, 19200, grid=grid(19200), stream=stream0)
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0)
del buf3
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_7
return (reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Net2(nn.Module):
"""
Net2 is a more complex network consisting of two hidden layers with 400
and 300 neurons
"""
hidden1 = 400
hidden2 = 300
def __init__(self, input_size):
super(Net2, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, 1)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (1, 300), (300, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf8, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf7, 19200, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=128, num_warps=4, num_stages=1)
del buf3
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6,
(300, 1), (1, 300), 0), alpha=1, beta=1, out=buf6)
del primals_7
return reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), buf4, primals_6, buf7, primals_4, buf8
class Net2New(nn.Module):
"""
Net2 is a more complex network consisting of two hidden layers with 400
and 300 neurons
"""
hidden1 = 400
hidden2 = 300
def __init__(self, input_size):
super(Net2New, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| moritzschaefer/pavooc | Net2 | false | 7,265 | [
"MIT"
] | 1 | 735f5455f9a95a5734436a24e2aa92cf600c91af | https://github.com/moritzschaefer/pavooc/tree/735f5455f9a95a5734436a24e2aa92cf600c91af | import torch
from torch import nn
class Model(nn.Module):
"""
Net2 is a more complex network consisting of two hidden layers with 400
and 300 neurons
"""
hidden1 = 400
hidden2 = 300
def __init__(self, input_size):
super().__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, 1)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Debugnetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# output => convolution
# output_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# output_4 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xq/cxqz2dr7nh2qabrtemj52pazmhrknj5ltcy32ka252ia6a3jgpqi.py
# Topologically Sorted Source Nodes: [output_5, output_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# output_5 => convolution_2
# output_6 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pr/cpri5daxkfbmt5ostbhb5o2avircr64a2rmdkxfackaxyjfc7owe.py
# Topologically Sorted Source Nodes: [output_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# output_9 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/of/cof37d5wbqzvtkioj7k4me7wqpvfv55rs62ytonj7gij2o3abnod.py
# Topologically Sorted Source Nodes: [output_10, output_11], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# output_10 => convolution_4
# output_11 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mn/cmnzsv2cdbsuq2sygridqvwumzmcvknuthlumel5m25l2ajsr4ft.py
# Topologically Sorted Source Nodes: [output_18], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# output_18 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ic/cicsjqc3cfcjzqlztx4hz7ssqwe47ngo3g2onc6463k3vgfmt5cw.py
# Topologically Sorted Source Nodes: [output_19, output_20], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# output_19 => convolution_8
# output_20 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rs/crsb2j7t6kjc2dizrgavde3h3rerob3nhf7iqux6o24562lkvvoe.py
# Topologically Sorted Source Nodes: [output_23, output_24], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# output_23 => convolution_10
# output_24 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kh/ckh2fqykduc5vzc66z3dzxnttv5ucnf27xwzzpqkv57775qdbypn.py
# Topologically Sorted Source Nodes: [output_25, output_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_25 => convolution_11
# output_26 => relu_11
# Graph fragment:
# %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_11 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_11, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256, ), (1, ))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [output_2, output_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [output_5], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [output_5, output_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [output_7], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [output_7, output_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 524288, grid=grid(524288), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [output_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [output_10], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [output_10, output_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [output_12], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [output_12, output_13], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [output_14], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [output_14, output_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 262144, grid=grid(262144), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [output_16], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [output_16, output_17], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf19, primals_17, 262144, grid=grid(262144), stream=stream0)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [output_18], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf19, buf20, buf21, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [output_19], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [output_19, output_20], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 131072, grid=grid(131072), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [output_21], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [output_21, output_22], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf25, primals_21, 131072, grid=grid(131072), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [output_23], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [output_23, output_24], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf27, primals_23, 65536, grid=grid(65536), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [output_25], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28; del buf28 # reuse
buf30 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_25, output_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf29, primals_25, buf30, 32768, grid=grid(32768), stream=stream0)
del primals_25
return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf27, buf30, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from torch.nn import init
class conv(nn.Module):
"""
n*n conv with relu
"""
def __init__(self, in_dim, out_dim, kernal_size, stride, padding):
super(conv, self).__init__()
self.con_layer = nn.Conv2d(in_dim, out_dim, kernal_size, stride,
padding)
self.relu = nn.ReLU(inplace=True)
self.initi()
def forward(self, input_):
output = self.con_layer(input_)
output = self.relu(output)
return output
def initi(self):
init.normal_(self.con_layer.weight, std=0.01)
if self.con_layer.bias is not None:
init.constant_(self.con_layer.bias, 0.0)
class VGG_19(nn.Module):
"""
VGG_19 first 10 layers
11 and 12 by CMU
"""
def __init__(self, input_dim):
super(VGG_19, self).__init__()
self.conv1_1 = conv(input_dim, 64, 3, 1, 1)
self.conv1_2 = conv(64, 64, 3, 1, 1)
self.pooling_1 = nn.MaxPool2d(2, 2, 0)
self.conv2_1 = conv(64, 128, 3, 1, 1)
self.conv2_2 = conv(128, 128, 3, 1, 1)
self.pooling_2 = nn.MaxPool2d(2, 2, 0)
self.conv3_1 = conv(128, 256, 3, 1, 1)
self.conv3_2 = conv(256, 256, 3, 1, 1)
self.conv3_3 = conv(256, 256, 3, 1, 1)
self.conv3_4 = conv(256, 256, 3, 1, 1)
self.pooling_3 = nn.MaxPool2d(2, 2, 0)
self.conv4_1 = conv(256, 512, 3, 1, 1)
self.conv4_2 = conv(512, 512, 3, 1, 1)
self.conv4_3 = conv(512, 256, 3, 1, 1)
self.conv4_4 = conv(256, 128, 3, 1, 1)
def forward(self, input_):
output = self.conv1_1(input_)
output = self.conv1_2(output)
output = self.pooling_1(output)
output = self.conv2_1(output)
output = self.conv2_2(output)
output = self.pooling_2(output)
output = self.conv3_1(output)
output = self.conv3_2(output)
output = self.conv3_3(output)
output = self.conv3_4(output)
output = self.pooling_3(output)
output = self.conv4_1(output)
output = self.conv4_2(output)
output = self.conv4_3(output)
output = self.conv4_4(output)
return output
class Debugnetwork(nn.Module):
"""
"""
def __init__(self, args):
super(Debugnetwork, self).__init__()
self.block_0 = VGG_19(3)
def forward(self, input_):
output = self.block_0(input_)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'args': _mock_config()}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4,
buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9,
buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19,
buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_6[grid(131072)](buf25, primals_21,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_7[grid(65536)](buf27, primals_23,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28
del buf28
buf30 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(32768)](
buf29, primals_25, buf30, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_25
return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, buf1, buf3, buf4, buf5, buf7,
buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23,
buf25, buf27, buf30)
class conv(nn.Module):
"""
n*n conv with relu
"""
def __init__(self, in_dim, out_dim, kernal_size, stride, padding):
super(conv, self).__init__()
self.con_layer = nn.Conv2d(in_dim, out_dim, kernal_size, stride,
padding)
self.relu = nn.ReLU(inplace=True)
self.initi()
def forward(self, input_):
output = self.con_layer(input_)
output = self.relu(output)
return output
def initi(self):
init.normal_(self.con_layer.weight, std=0.01)
if self.con_layer.bias is not None:
init.constant_(self.con_layer.bias, 0.0)
class VGG_19(nn.Module):
"""
VGG_19 first 10 layers
11 and 12 by CMU
"""
def __init__(self, input_dim):
super(VGG_19, self).__init__()
self.conv1_1 = conv(input_dim, 64, 3, 1, 1)
self.conv1_2 = conv(64, 64, 3, 1, 1)
self.pooling_1 = nn.MaxPool2d(2, 2, 0)
self.conv2_1 = conv(64, 128, 3, 1, 1)
self.conv2_2 = conv(128, 128, 3, 1, 1)
self.pooling_2 = nn.MaxPool2d(2, 2, 0)
self.conv3_1 = conv(128, 256, 3, 1, 1)
self.conv3_2 = conv(256, 256, 3, 1, 1)
self.conv3_3 = conv(256, 256, 3, 1, 1)
self.conv3_4 = conv(256, 256, 3, 1, 1)
self.pooling_3 = nn.MaxPool2d(2, 2, 0)
self.conv4_1 = conv(256, 512, 3, 1, 1)
self.conv4_2 = conv(512, 512, 3, 1, 1)
self.conv4_3 = conv(512, 256, 3, 1, 1)
self.conv4_4 = conv(256, 128, 3, 1, 1)
def forward(self, input_):
output = self.conv1_1(input_)
output = self.conv1_2(output)
output = self.pooling_1(output)
output = self.conv2_1(output)
output = self.conv2_2(output)
output = self.pooling_2(output)
output = self.conv3_1(output)
output = self.conv3_2(output)
output = self.conv3_3(output)
output = self.conv3_4(output)
output = self.pooling_3(output)
output = self.conv4_1(output)
output = self.conv4_2(output)
output = self.conv4_3(output)
output = self.conv4_4(output)
return output
class DebugnetworkNew(nn.Module):
"""
"""
def __init__(self, args):
super(DebugnetworkNew, self).__init__()
self.block_0 = VGG_19(3)
def forward(self, input_0):
primals_1 = self.block_0.conv1_1.con_layer.weight
primals_2 = self.block_0.conv1_1.con_layer.bias
primals_4 = self.block_0.conv1_2.con_layer.weight
primals_5 = self.block_0.conv1_2.con_layer.bias
primals_6 = self.block_0.conv2_1.con_layer.weight
primals_7 = self.block_0.conv2_1.con_layer.bias
primals_8 = self.block_0.conv2_2.con_layer.weight
primals_9 = self.block_0.conv2_2.con_layer.bias
primals_10 = self.block_0.conv3_1.con_layer.weight
primals_11 = self.block_0.conv3_1.con_layer.bias
primals_12 = self.block_0.conv3_2.con_layer.weight
primals_13 = self.block_0.conv3_2.con_layer.bias
primals_14 = self.block_0.conv3_3.con_layer.weight
primals_15 = self.block_0.conv3_3.con_layer.bias
primals_16 = self.block_0.conv3_4.con_layer.weight
primals_17 = self.block_0.conv3_4.con_layer.bias
primals_18 = self.block_0.conv4_1.con_layer.weight
primals_19 = self.block_0.conv4_1.con_layer.bias
primals_20 = self.block_0.conv4_2.con_layer.weight
primals_21 = self.block_0.conv4_2.con_layer.bias
primals_22 = self.block_0.conv4_3.con_layer.weight
primals_23 = self.block_0.conv4_3.con_layer.bias
primals_24 = self.block_0.conv4_4.con_layer.weight
primals_25 = self.block_0.conv4_4.con_layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25])
return output[0]
| H-Liu1997/Pytorch_Pose_Estimation_Framework | Debugnetwork | false | 7,266 | [
"MIT"
] | 1 | 06616b3459ff639f8486e6ea4f93922597788b2a | https://github.com/H-Liu1997/Pytorch_Pose_Estimation_Framework/tree/06616b3459ff639f8486e6ea4f93922597788b2a | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
from torch.nn import init
class conv(nn.Module):
"""
n*n conv with relu
"""
def __init__(self, in_dim, out_dim, kernal_size, stride, padding):
super().__init__()
self.con_layer = nn.Conv2d(in_dim, out_dim, kernal_size, stride,
padding)
self.relu = nn.ReLU(inplace=True)
self.initi()
def forward(self, input_):
output = self.con_layer(input_)
output = self.relu(output)
return output
def initi(self):
init.normal_(self.con_layer.weight, std=0.01)
if self.con_layer.bias is not None:
init.constant_(self.con_layer.bias, 0.0)
class VGG_19(nn.Module):
"""
VGG_19 first 10 layers
11 and 12 by CMU
"""
def __init__(self, input_dim):
super().__init__()
self.conv1_1 = conv(input_dim, 64, 3, 1, 1)
self.conv1_2 = conv(64, 64, 3, 1, 1)
self.pooling_1 = nn.MaxPool2d(2, 2, 0)
self.conv2_1 = conv(64, 128, 3, 1, 1)
self.conv2_2 = conv(128, 128, 3, 1, 1)
self.pooling_2 = nn.MaxPool2d(2, 2, 0)
self.conv3_1 = conv(128, 256, 3, 1, 1)
self.conv3_2 = conv(256, 256, 3, 1, 1)
self.conv3_3 = conv(256, 256, 3, 1, 1)
self.conv3_4 = conv(256, 256, 3, 1, 1)
self.pooling_3 = nn.MaxPool2d(2, 2, 0)
self.conv4_1 = conv(256, 512, 3, 1, 1)
self.conv4_2 = conv(512, 512, 3, 1, 1)
self.conv4_3 = conv(512, 256, 3, 1, 1)
self.conv4_4 = conv(256, 128, 3, 1, 1)
def forward(self, input_):
output = self.conv1_1(input_)
output = self.conv1_2(output)
output = self.pooling_1(output)
output = self.conv2_1(output)
output = self.conv2_2(output)
output = self.pooling_2(output)
output = self.conv3_1(output)
output = self.conv3_2(output)
output = self.conv3_3(output)
output = self.conv3_4(output)
output = self.pooling_3(output)
output = self.conv4_1(output)
output = self.conv4_2(output)
output = self.conv4_3(output)
output = self.conv4_4(output)
return output
class Model(nn.Module):
"""
"""
def __init__(self, args):
super().__init__()
self.block_0 = VGG_19(3)
def forward(self, input_):
output = self.block_0(input_)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
NeuralNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# out_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# out_3 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# out_3 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class NeuralNet(nn.Module):
def __init__(self, num_input_nodes, num_hidden_nodes, output_dimension):
super(NeuralNet, self).__init__()
self.input_linear = nn.Linear(num_input_nodes, num_hidden_nodes)
self.output_linear = nn.Linear(num_hidden_nodes, output_dimension)
def forward(self, input_vector):
out = self.input_linear(input_vector)
out = F.tanh(out)
out = self.output_linear(out)
out = F.softmax(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_input_nodes': 4, 'num_hidden_nodes': 4,
'output_dimension': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf4, primals_4
class NeuralNetNew(nn.Module):
def __init__(self, num_input_nodes, num_hidden_nodes, output_dimension):
super(NeuralNetNew, self).__init__()
self.input_linear = nn.Linear(num_input_nodes, num_hidden_nodes)
self.output_linear = nn.Linear(num_hidden_nodes, output_dimension)
def forward(self, input_0):
primals_1 = self.input_linear.weight
primals_2 = self.input_linear.bias
primals_4 = self.output_linear.weight
primals_5 = self.output_linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| mohiitgupta/named-entity-recognition-nlp-purdue | NeuralNet | false | 7,267 | [
"MIT"
] | 1 | 68232bbd5d17f3e3989e5df37175cdc670896608 | https://github.com/mohiitgupta/named-entity-recognition-nlp-purdue/tree/68232bbd5d17f3e3989e5df37175cdc670896608 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_input_nodes, num_hidden_nodes, output_dimension):
super().__init__()
self.input_linear = nn.Linear(num_input_nodes, num_hidden_nodes)
self.output_linear = nn.Linear(num_hidden_nodes, output_dimension)
def forward(self, input_vector):
out = self.input_linear(input_vector)
out = F.tanh(out)
out = self.output_linear(out)
out = F.softmax(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_input_nodes': 4, 'num_hidden_nodes': 4,
'output_dimension': 4}]
|
LoRALayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/s4/cs4a3d5eq4vbxgviqbcvk4zafoqduplldcmyyynxgkd23bvnm7ty.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf2, 256, grid=grid(256), stream=stream0)
return (buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.parallel
import torch.utils.data
class LoRALayer(nn.Module):
def __init__(self, n_in, n_out=None, adapter_dim=16, adapter_alpha=32):
super(LoRALayer, self).__init__()
if not n_out:
n_out = n_in
self.adapter_dim = adapter_dim
self.adapter_alpha = adapter_alpha
self.adapter_proj_1 = nn.Linear(n_in, adapter_dim, bias=False)
nn.init.normal_(self.adapter_proj_1.weight, std=0.02)
self.adapter_proj_2 = nn.Linear(adapter_dim, n_out, bias=False)
self.adapter_proj_2.weight.data.zero_()
def forward(self, x):
scale_factor = self.adapter_dim / self.adapter_alpha
result = torch.matmul(x, self.adapter_proj_1.weight.type_as(x).T)
return torch.matmul(result, self.adapter_proj_2.weight.type_as(x).T
) * scale_factor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (16, 4), (1,
16), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](buf2, 256, XBLOCK=128, num_warps=
4, num_stages=1)
return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, primals_3
class LoRALayerNew(nn.Module):
def __init__(self, n_in, n_out=None, adapter_dim=16, adapter_alpha=32):
super(LoRALayerNew, self).__init__()
if not n_out:
n_out = n_in
self.adapter_dim = adapter_dim
self.adapter_alpha = adapter_alpha
self.adapter_proj_1 = nn.Linear(n_in, adapter_dim, bias=False)
nn.init.normal_(self.adapter_proj_1.weight, std=0.02)
self.adapter_proj_2 = nn.Linear(adapter_dim, n_out, bias=False)
self.adapter_proj_2.weight.data.zero_()
def forward(self, input_0):
primals_1 = self.adapter_proj_1.weight
primals_3 = self.adapter_proj_2.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mojishoki/LoRA | LoRALayer | false | 7,268 | [
"MIT"
] | 1 | 556225e776b4e2c5f77d332db15f0c712c13fe0e | https://github.com/mojishoki/LoRA/tree/556225e776b4e2c5f77d332db15f0c712c13fe0e | import torch
from torch import nn
import torch.nn.parallel
import torch.utils.data
class Model(nn.Module):
def __init__(self, n_in, n_out=None, adapter_dim=16, adapter_alpha=32):
super().__init__()
if not n_out:
n_out = n_in
self.adapter_dim = adapter_dim
self.adapter_alpha = adapter_alpha
self.adapter_proj_1 = nn.Linear(n_in, adapter_dim, bias=False)
nn.init.normal_(self.adapter_proj_1.weight, std=0.02)
self.adapter_proj_2 = nn.Linear(adapter_dim, n_out, bias=False)
self.adapter_proj_2.weight.data.zero_()
def forward(self, x):
scale_factor = self.adapter_dim / self.adapter_alpha
result = torch.matmul(x, self.adapter_proj_1.weight.type_as(x).T)
return torch.matmul(result, self.adapter_proj_2.weight.type_as(x).T
) * scale_factor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
NetVLAD | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rf/crf4dxzzztpnpsbe5tqyxtwblvnanasglxpd4kotnpzrxio4tyxt.py
# Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# soft_assign_1 => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 64],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5d/c5d7kzlutdnexlvvxxhykl25ql7o3i5jbl47ltsdy3da45644hz6.py
# Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_2, residual_3, sum_2, residual_4, residual_5, sum_3, residual_6, residual_7, sum_4, residual_8, residual_9, sum_5, residual_10, residual_11, sum_6, residual_12, residual_13, sum_7, residual_14, residual_15, sum_8, residual_16, residual_17, sum_9, residual_18, residual_19, sum_10, residual_20, residual_21, sum_11, residual_22, residual_23, sum_12, residual_24, residual_25, sum_13, residual_26, residual_27, sum_14, residual_28, residual_29, sum_15, residual_30, residual_31, sum_16, residual_32, residual_33, sum_17, residual_34, residual_35, sum_18, residual_36, residual_37, sum_19, residual_38, residual_39, sum_20, residual_40, residual_41, sum_21, residual_42, residual_43, sum_22, residual_44, residual_45, sum_23, residual_46, residual_47, sum_24, residual_48, residual_49, sum_25, residual_50, residual_51, sum_26, residual_52, residual_53, sum_27, residual_54, residual_55, sum_28, residual_56, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# residual => sub_1
# residual_1 => mul
# residual_10 => sub_6
# residual_11 => mul_5
# residual_12 => sub_7
# residual_13 => mul_6
# residual_14 => sub_8
# residual_15 => mul_7
# residual_16 => sub_9
# residual_17 => mul_8
# residual_18 => sub_10
# residual_19 => mul_9
# residual_2 => sub_2
# residual_20 => sub_11
# residual_21 => mul_10
# residual_22 => sub_12
# residual_23 => mul_11
# residual_24 => sub_13
# residual_25 => mul_12
# residual_26 => sub_14
# residual_27 => mul_13
# residual_28 => sub_15
# residual_29 => mul_14
# residual_3 => mul_1
# residual_30 => sub_16
# residual_31 => mul_15
# residual_32 => sub_17
# residual_33 => mul_16
# residual_34 => sub_18
# residual_35 => mul_17
# residual_36 => sub_19
# residual_37 => mul_18
# residual_38 => sub_20
# residual_39 => mul_19
# residual_4 => sub_3
# residual_40 => sub_21
# residual_41 => mul_20
# residual_42 => sub_22
# residual_43 => mul_21
# residual_44 => sub_23
# residual_45 => mul_22
# residual_46 => sub_24
# residual_47 => mul_23
# residual_48 => sub_25
# residual_49 => mul_24
# residual_5 => mul_2
# residual_50 => sub_26
# residual_51 => mul_25
# residual_52 => sub_27
# residual_53 => mul_26
# residual_54 => sub_28
# residual_55 => mul_27
# residual_56 => sub_29
# residual_57 => mul_28
# residual_6 => sub_4
# residual_7 => mul_3
# residual_8 => sub_5
# residual_9 => mul_4
# sum_1 => sum_2
# sum_10 => sum_11
# sum_11 => sum_12
# sum_12 => sum_13
# sum_13 => sum_14
# sum_14 => sum_15
# sum_15 => sum_16
# sum_16 => sum_17
# sum_17 => sum_18
# sum_18 => sum_19
# sum_19 => sum_20
# sum_2 => sum_3
# sum_20 => sum_21
# sum_21 => sum_22
# sum_22 => sum_23
# sum_23 => sum_24
# sum_24 => sum_25
# sum_25 => sum_26
# sum_26 => sum_27
# sum_27 => sum_28
# sum_28 => sum_29
# sum_29 => sum_30
# sum_3 => sum_4
# sum_4 => sum_5
# sum_5 => sum_6
# sum_6 => sum_7
# sum_7 => sum_8
# sum_8 => sum_9
# sum_9 => sum_10
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %unsqueeze_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %unsqueeze_5), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_7), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %unsqueeze_8), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {})
# %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_10), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %unsqueeze_11), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1]), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_13), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %unsqueeze_14), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-1]), kwargs = {})
# %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_16), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %unsqueeze_17), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {})
# %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_19), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %unsqueeze_20), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [-1]), kwargs = {})
# %sub_8 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_22), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %unsqueeze_23), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [-1]), kwargs = {})
# %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_25), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %unsqueeze_26), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [-1]), kwargs = {})
# %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_28), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %unsqueeze_29), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [-1]), kwargs = {})
# %sub_11 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_31), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %unsqueeze_32), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [-1]), kwargs = {})
# %sub_12 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_34), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %unsqueeze_35), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_11, [-1]), kwargs = {})
# %sub_13 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_37), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %unsqueeze_38), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [-1]), kwargs = {})
# %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_40), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %unsqueeze_41), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_13, [-1]), kwargs = {})
# %sub_15 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_43), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %unsqueeze_44), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_14, [-1]), kwargs = {})
# %sub_16 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_46), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %unsqueeze_47), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_15, [-1]), kwargs = {})
# %sub_17 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_49), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %unsqueeze_50), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_16, [-1]), kwargs = {})
# %sub_18 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_52), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %unsqueeze_53), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_17, [-1]), kwargs = {})
# %sub_19 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_55), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %unsqueeze_56), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_18, [-1]), kwargs = {})
# %sub_20 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_58), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %unsqueeze_59), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_19, [-1]), kwargs = {})
# %sub_21 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_61), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %unsqueeze_62), kwargs = {})
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_20, [-1]), kwargs = {})
# %sub_22 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_64), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %unsqueeze_65), kwargs = {})
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_21, [-1]), kwargs = {})
# %sub_23 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_67), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %unsqueeze_68), kwargs = {})
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_22, [-1]), kwargs = {})
# %sub_24 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_70), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %unsqueeze_71), kwargs = {})
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_23, [-1]), kwargs = {})
# %sub_25 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_73), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %unsqueeze_74), kwargs = {})
# %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_24, [-1]), kwargs = {})
# %sub_26 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_76), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %unsqueeze_77), kwargs = {})
# %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_25, [-1]), kwargs = {})
# %sub_27 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_79), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %unsqueeze_80), kwargs = {})
# %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_26, [-1]), kwargs = {})
# %sub_28 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_82), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_28, %unsqueeze_83), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_27, [-1]), kwargs = {})
# %sub_29 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_85), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %unsqueeze_86), kwargs = {})
# %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_28, [-1]), kwargs = {})
triton_per_fused_mul_sub_sum_1 = async_compile.triton('triton_per_fused_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: 'i32', 63: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 61, 'num_reduction': 29, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (20 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (24 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (28 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (36 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (40 + x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (44 + x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (52 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (56 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (60 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (68 + x0), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr1 + (72 + x0), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (76 + x0), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (80 + x0), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr1 + (84 + x0), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr1 + (88 + x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr1 + (92 + x0), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr1 + (96 + x0), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr1 + (100 + x0), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr1 + (104 + x0), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr1 + (108 + x0), xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr1 + (112 + x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp59 = tl.load(in_ptr2 + (r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp60 = tl.load(in_ptr3 + (r2 + (16*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp63 = tl.load(in_ptr4 + (r2 + (16*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp70 = tl.load(in_ptr2 + (16 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp79 = tl.load(in_ptr2 + (32 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp88 = tl.load(in_ptr2 + (48 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp97 = tl.load(in_ptr2 + (64 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp106 = tl.load(in_ptr2 + (80 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp115 = tl.load(in_ptr2 + (96 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp124 = tl.load(in_ptr2 + (112 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp133 = tl.load(in_ptr2 + (128 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp142 = tl.load(in_ptr2 + (144 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp151 = tl.load(in_ptr2 + (160 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp160 = tl.load(in_ptr2 + (176 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp169 = tl.load(in_ptr2 + (192 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp178 = tl.load(in_ptr2 + (208 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp187 = tl.load(in_ptr2 + (224 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp196 = tl.load(in_ptr2 + (240 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp205 = tl.load(in_ptr2 + (256 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp214 = tl.load(in_ptr2 + (272 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp223 = tl.load(in_ptr2 + (288 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp232 = tl.load(in_ptr2 + (304 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp241 = tl.load(in_ptr2 + (320 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp250 = tl.load(in_ptr2 + (336 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp259 = tl.load(in_ptr2 + (352 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp268 = tl.load(in_ptr2 + (368 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp277 = tl.load(in_ptr2 + (384 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp286 = tl.load(in_ptr2 + (400 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp295 = tl.load(in_ptr2 + (416 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp304 = tl.load(in_ptr2 + (432 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp313 = tl.load(in_ptr2 + (448 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp4 = tmp0 - tmp3
tmp6 = tmp0 - tmp5
tmp8 = tmp0 - tmp7
tmp10 = tmp0 - tmp9
tmp12 = tmp0 - tmp11
tmp14 = tmp0 - tmp13
tmp16 = tmp0 - tmp15
tmp18 = tmp0 - tmp17
tmp20 = tmp0 - tmp19
tmp22 = tmp0 - tmp21
tmp24 = tmp0 - tmp23
tmp26 = tmp0 - tmp25
tmp28 = tmp0 - tmp27
tmp30 = tmp0 - tmp29
tmp32 = tmp0 - tmp31
tmp34 = tmp0 - tmp33
tmp36 = tmp0 - tmp35
tmp38 = tmp0 - tmp37
tmp40 = tmp0 - tmp39
tmp42 = tmp0 - tmp41
tmp44 = tmp0 - tmp43
tmp46 = tmp0 - tmp45
tmp48 = tmp0 - tmp47
tmp50 = tmp0 - tmp49
tmp52 = tmp0 - tmp51
tmp54 = tmp0 - tmp53
tmp56 = tmp0 - tmp55
tmp58 = tmp0 - tmp57
tmp61 = tmp59 - tmp60
tmp62 = tl_math.exp(tmp61)
tmp64 = tmp62 / tmp63
tmp65 = tmp58 * tmp64
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp68 = tl.where(xmask, tmp66, 0)
tmp69 = tl.sum(tmp68, 1)[:, None]
tmp71 = tmp70 - tmp60
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp72 / tmp63
tmp74 = tmp2 * tmp73
tmp75 = tl.broadcast_to(tmp74, [XBLOCK, RBLOCK])
tmp77 = tl.where(xmask, tmp75, 0)
tmp78 = tl.sum(tmp77, 1)[:, None]
tmp80 = tmp79 - tmp60
tmp81 = tl_math.exp(tmp80)
tmp82 = tmp81 / tmp63
tmp83 = tmp4 * tmp82
tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK])
tmp86 = tl.where(xmask, tmp84, 0)
tmp87 = tl.sum(tmp86, 1)[:, None]
tmp89 = tmp88 - tmp60
tmp90 = tl_math.exp(tmp89)
tmp91 = tmp90 / tmp63
tmp92 = tmp6 * tmp91
tmp93 = tl.broadcast_to(tmp92, [XBLOCK, RBLOCK])
tmp95 = tl.where(xmask, tmp93, 0)
tmp96 = tl.sum(tmp95, 1)[:, None]
tmp98 = tmp97 - tmp60
tmp99 = tl_math.exp(tmp98)
tmp100 = tmp99 / tmp63
tmp101 = tmp8 * tmp100
tmp102 = tl.broadcast_to(tmp101, [XBLOCK, RBLOCK])
tmp104 = tl.where(xmask, tmp102, 0)
tmp105 = tl.sum(tmp104, 1)[:, None]
tmp107 = tmp106 - tmp60
tmp108 = tl_math.exp(tmp107)
tmp109 = tmp108 / tmp63
tmp110 = tmp10 * tmp109
tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK])
tmp113 = tl.where(xmask, tmp111, 0)
tmp114 = tl.sum(tmp113, 1)[:, None]
tmp116 = tmp115 - tmp60
tmp117 = tl_math.exp(tmp116)
tmp118 = tmp117 / tmp63
tmp119 = tmp12 * tmp118
tmp120 = tl.broadcast_to(tmp119, [XBLOCK, RBLOCK])
tmp122 = tl.where(xmask, tmp120, 0)
tmp123 = tl.sum(tmp122, 1)[:, None]
tmp125 = tmp124 - tmp60
tmp126 = tl_math.exp(tmp125)
tmp127 = tmp126 / tmp63
tmp128 = tmp14 * tmp127
tmp129 = tl.broadcast_to(tmp128, [XBLOCK, RBLOCK])
tmp131 = tl.where(xmask, tmp129, 0)
tmp132 = tl.sum(tmp131, 1)[:, None]
tmp134 = tmp133 - tmp60
tmp135 = tl_math.exp(tmp134)
tmp136 = tmp135 / tmp63
tmp137 = tmp16 * tmp136
tmp138 = tl.broadcast_to(tmp137, [XBLOCK, RBLOCK])
tmp140 = tl.where(xmask, tmp138, 0)
tmp141 = tl.sum(tmp140, 1)[:, None]
tmp143 = tmp142 - tmp60
tmp144 = tl_math.exp(tmp143)
tmp145 = tmp144 / tmp63
tmp146 = tmp18 * tmp145
tmp147 = tl.broadcast_to(tmp146, [XBLOCK, RBLOCK])
tmp149 = tl.where(xmask, tmp147, 0)
tmp150 = tl.sum(tmp149, 1)[:, None]
tmp152 = tmp151 - tmp60
tmp153 = tl_math.exp(tmp152)
tmp154 = tmp153 / tmp63
tmp155 = tmp20 * tmp154
tmp156 = tl.broadcast_to(tmp155, [XBLOCK, RBLOCK])
tmp158 = tl.where(xmask, tmp156, 0)
tmp159 = tl.sum(tmp158, 1)[:, None]
tmp161 = tmp160 - tmp60
tmp162 = tl_math.exp(tmp161)
tmp163 = tmp162 / tmp63
tmp164 = tmp22 * tmp163
tmp165 = tl.broadcast_to(tmp164, [XBLOCK, RBLOCK])
tmp167 = tl.where(xmask, tmp165, 0)
tmp168 = tl.sum(tmp167, 1)[:, None]
tmp170 = tmp169 - tmp60
tmp171 = tl_math.exp(tmp170)
tmp172 = tmp171 / tmp63
tmp173 = tmp24 * tmp172
tmp174 = tl.broadcast_to(tmp173, [XBLOCK, RBLOCK])
tmp176 = tl.where(xmask, tmp174, 0)
tmp177 = tl.sum(tmp176, 1)[:, None]
tmp179 = tmp178 - tmp60
tmp180 = tl_math.exp(tmp179)
tmp181 = tmp180 / tmp63
tmp182 = tmp26 * tmp181
tmp183 = tl.broadcast_to(tmp182, [XBLOCK, RBLOCK])
tmp185 = tl.where(xmask, tmp183, 0)
tmp186 = tl.sum(tmp185, 1)[:, None]
tmp188 = tmp187 - tmp60
tmp189 = tl_math.exp(tmp188)
tmp190 = tmp189 / tmp63
tmp191 = tmp28 * tmp190
tmp192 = tl.broadcast_to(tmp191, [XBLOCK, RBLOCK])
tmp194 = tl.where(xmask, tmp192, 0)
tmp195 = tl.sum(tmp194, 1)[:, None]
tmp197 = tmp196 - tmp60
tmp198 = tl_math.exp(tmp197)
tmp199 = tmp198 / tmp63
tmp200 = tmp30 * tmp199
tmp201 = tl.broadcast_to(tmp200, [XBLOCK, RBLOCK])
tmp203 = tl.where(xmask, tmp201, 0)
tmp204 = tl.sum(tmp203, 1)[:, None]
tmp206 = tmp205 - tmp60
tmp207 = tl_math.exp(tmp206)
tmp208 = tmp207 / tmp63
tmp209 = tmp32 * tmp208
tmp210 = tl.broadcast_to(tmp209, [XBLOCK, RBLOCK])
tmp212 = tl.where(xmask, tmp210, 0)
tmp213 = tl.sum(tmp212, 1)[:, None]
tmp215 = tmp214 - tmp60
tmp216 = tl_math.exp(tmp215)
tmp217 = tmp216 / tmp63
tmp218 = tmp34 * tmp217
tmp219 = tl.broadcast_to(tmp218, [XBLOCK, RBLOCK])
tmp221 = tl.where(xmask, tmp219, 0)
tmp222 = tl.sum(tmp221, 1)[:, None]
tmp224 = tmp223 - tmp60
tmp225 = tl_math.exp(tmp224)
tmp226 = tmp225 / tmp63
tmp227 = tmp36 * tmp226
tmp228 = tl.broadcast_to(tmp227, [XBLOCK, RBLOCK])
tmp230 = tl.where(xmask, tmp228, 0)
tmp231 = tl.sum(tmp230, 1)[:, None]
tmp233 = tmp232 - tmp60
tmp234 = tl_math.exp(tmp233)
tmp235 = tmp234 / tmp63
tmp236 = tmp38 * tmp235
tmp237 = tl.broadcast_to(tmp236, [XBLOCK, RBLOCK])
tmp239 = tl.where(xmask, tmp237, 0)
tmp240 = tl.sum(tmp239, 1)[:, None]
tmp242 = tmp241 - tmp60
tmp243 = tl_math.exp(tmp242)
tmp244 = tmp243 / tmp63
tmp245 = tmp40 * tmp244
tmp246 = tl.broadcast_to(tmp245, [XBLOCK, RBLOCK])
tmp248 = tl.where(xmask, tmp246, 0)
tmp249 = tl.sum(tmp248, 1)[:, None]
tmp251 = tmp250 - tmp60
tmp252 = tl_math.exp(tmp251)
tmp253 = tmp252 / tmp63
tmp254 = tmp42 * tmp253
tmp255 = tl.broadcast_to(tmp254, [XBLOCK, RBLOCK])
tmp257 = tl.where(xmask, tmp255, 0)
tmp258 = tl.sum(tmp257, 1)[:, None]
tmp260 = tmp259 - tmp60
tmp261 = tl_math.exp(tmp260)
tmp262 = tmp261 / tmp63
tmp263 = tmp44 * tmp262
tmp264 = tl.broadcast_to(tmp263, [XBLOCK, RBLOCK])
tmp266 = tl.where(xmask, tmp264, 0)
tmp267 = tl.sum(tmp266, 1)[:, None]
tmp269 = tmp268 - tmp60
tmp270 = tl_math.exp(tmp269)
tmp271 = tmp270 / tmp63
tmp272 = tmp46 * tmp271
tmp273 = tl.broadcast_to(tmp272, [XBLOCK, RBLOCK])
tmp275 = tl.where(xmask, tmp273, 0)
tmp276 = tl.sum(tmp275, 1)[:, None]
tmp278 = tmp277 - tmp60
tmp279 = tl_math.exp(tmp278)
tmp280 = tmp279 / tmp63
tmp281 = tmp48 * tmp280
tmp282 = tl.broadcast_to(tmp281, [XBLOCK, RBLOCK])
tmp284 = tl.where(xmask, tmp282, 0)
tmp285 = tl.sum(tmp284, 1)[:, None]
tmp287 = tmp286 - tmp60
tmp288 = tl_math.exp(tmp287)
tmp289 = tmp288 / tmp63
tmp290 = tmp50 * tmp289
tmp291 = tl.broadcast_to(tmp290, [XBLOCK, RBLOCK])
tmp293 = tl.where(xmask, tmp291, 0)
tmp294 = tl.sum(tmp293, 1)[:, None]
tmp296 = tmp295 - tmp60
tmp297 = tl_math.exp(tmp296)
tmp298 = tmp297 / tmp63
tmp299 = tmp52 * tmp298
tmp300 = tl.broadcast_to(tmp299, [XBLOCK, RBLOCK])
tmp302 = tl.where(xmask, tmp300, 0)
tmp303 = tl.sum(tmp302, 1)[:, None]
tmp305 = tmp304 - tmp60
tmp306 = tl_math.exp(tmp305)
tmp307 = tmp306 / tmp63
tmp308 = tmp54 * tmp307
tmp309 = tl.broadcast_to(tmp308, [XBLOCK, RBLOCK])
tmp311 = tl.where(xmask, tmp309, 0)
tmp312 = tl.sum(tmp311, 1)[:, None]
tmp314 = tmp313 - tmp60
tmp315 = tl_math.exp(tmp314)
tmp316 = tmp315 / tmp63
tmp317 = tmp56 * tmp316
tmp318 = tl.broadcast_to(tmp317, [XBLOCK, RBLOCK])
tmp320 = tl.where(xmask, tmp318, 0)
tmp321 = tl.sum(tmp320, 1)[:, None]
tl.store(out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr1 + (r2 + (16*x3)), tmp4, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp6, xmask)
tl.store(out_ptr3 + (r2 + (16*x3)), tmp8, xmask)
tl.store(out_ptr4 + (r2 + (16*x3)), tmp10, xmask)
tl.store(out_ptr5 + (r2 + (16*x3)), tmp12, xmask)
tl.store(out_ptr6 + (r2 + (16*x3)), tmp14, xmask)
tl.store(out_ptr7 + (r2 + (16*x3)), tmp16, xmask)
tl.store(out_ptr8 + (r2 + (16*x3)), tmp18, xmask)
tl.store(out_ptr9 + (r2 + (16*x3)), tmp20, xmask)
tl.store(out_ptr10 + (r2 + (16*x3)), tmp22, xmask)
tl.store(out_ptr11 + (r2 + (16*x3)), tmp24, xmask)
tl.store(out_ptr12 + (r2 + (16*x3)), tmp26, xmask)
tl.store(out_ptr13 + (r2 + (16*x3)), tmp28, xmask)
tl.store(out_ptr14 + (r2 + (16*x3)), tmp30, xmask)
tl.store(out_ptr15 + (r2 + (16*x3)), tmp32, xmask)
tl.store(out_ptr16 + (r2 + (16*x3)), tmp34, xmask)
tl.store(out_ptr17 + (r2 + (16*x3)), tmp36, xmask)
tl.store(out_ptr18 + (r2 + (16*x3)), tmp38, xmask)
tl.store(out_ptr19 + (r2 + (16*x3)), tmp40, xmask)
tl.store(out_ptr20 + (r2 + (16*x3)), tmp42, xmask)
tl.store(out_ptr21 + (r2 + (16*x3)), tmp44, xmask)
tl.store(out_ptr22 + (r2 + (16*x3)), tmp46, xmask)
tl.store(out_ptr23 + (r2 + (16*x3)), tmp48, xmask)
tl.store(out_ptr24 + (r2 + (16*x3)), tmp50, xmask)
tl.store(out_ptr25 + (r2 + (16*x3)), tmp52, xmask)
tl.store(out_ptr26 + (r2 + (16*x3)), tmp54, xmask)
tl.store(out_ptr27 + (r2 + (16*x3)), tmp56, xmask)
tl.store(out_ptr28 + (x3), tmp69, xmask)
tl.store(out_ptr29 + (x3), tmp78, xmask)
tl.store(out_ptr30 + (x3), tmp87, xmask)
tl.store(out_ptr31 + (x3), tmp96, xmask)
tl.store(out_ptr32 + (x3), tmp105, xmask)
tl.store(out_ptr33 + (x3), tmp114, xmask)
tl.store(out_ptr34 + (x3), tmp123, xmask)
tl.store(out_ptr35 + (x3), tmp132, xmask)
tl.store(out_ptr36 + (x3), tmp141, xmask)
tl.store(out_ptr37 + (x3), tmp150, xmask)
tl.store(out_ptr38 + (x3), tmp159, xmask)
tl.store(out_ptr39 + (x3), tmp168, xmask)
tl.store(out_ptr40 + (x3), tmp177, xmask)
tl.store(out_ptr41 + (x3), tmp186, xmask)
tl.store(out_ptr42 + (x3), tmp195, xmask)
tl.store(out_ptr43 + (x3), tmp204, xmask)
tl.store(out_ptr44 + (x3), tmp213, xmask)
tl.store(out_ptr45 + (x3), tmp222, xmask)
tl.store(out_ptr46 + (x3), tmp231, xmask)
tl.store(out_ptr47 + (x3), tmp240, xmask)
tl.store(out_ptr48 + (x3), tmp249, xmask)
tl.store(out_ptr49 + (x3), tmp258, xmask)
tl.store(out_ptr50 + (x3), tmp267, xmask)
tl.store(out_ptr51 + (x3), tmp276, xmask)
tl.store(out_ptr52 + (x3), tmp285, xmask)
tl.store(out_ptr53 + (x3), tmp294, xmask)
tl.store(out_ptr54 + (x3), tmp303, xmask)
tl.store(out_ptr55 + (x3), tmp312, xmask)
tl.store(out_ptr56 + (x3), tmp321, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pc/cpc4rilvtd5w4yagyevfs2h753octnb56qd5zek3pkk2r4jika3b.py
# Topologically Sorted Source Nodes: [residual_58, residual_59, sum_30, residual_60, residual_61, sum_31, residual_62, residual_63, sum_32, residual_64, residual_65, sum_33, residual_66, residual_67, sum_34, residual_68, residual_69, sum_35, residual_70, residual_71, sum_36, residual_72, residual_73, sum_37, residual_74, residual_75, sum_38, residual_76, residual_77, sum_39, residual_78, residual_79, sum_40, residual_80, residual_81, sum_41, residual_82, residual_83, sum_42, residual_84, residual_85, sum_43, residual_86, residual_87, sum_44, residual_88, residual_89, sum_45, residual_90, residual_91, sum_46, residual_92, residual_93, sum_47, residual_94, residual_95, sum_48, residual_96, residual_97, sum_49, residual_98, residual_99, sum_50, residual_100, residual_101, sum_51, residual_102, residual_103, sum_52, residual_104, residual_105, sum_53, residual_106, residual_107, sum_54, residual_108, residual_109, sum_55, residual_110, residual_111, sum_56, residual_112, residual_113, sum_57], Original ATen: [aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# residual_100 => sub_51
# residual_101 => mul_50
# residual_102 => sub_52
# residual_103 => mul_51
# residual_104 => sub_53
# residual_105 => mul_52
# residual_106 => sub_54
# residual_107 => mul_53
# residual_108 => sub_55
# residual_109 => mul_54
# residual_110 => sub_56
# residual_111 => mul_55
# residual_112 => sub_57
# residual_113 => mul_56
# residual_58 => sub_30
# residual_59 => mul_29
# residual_60 => sub_31
# residual_61 => mul_30
# residual_62 => sub_32
# residual_63 => mul_31
# residual_64 => sub_33
# residual_65 => mul_32
# residual_66 => sub_34
# residual_67 => mul_33
# residual_68 => sub_35
# residual_69 => mul_34
# residual_70 => sub_36
# residual_71 => mul_35
# residual_72 => sub_37
# residual_73 => mul_36
# residual_74 => sub_38
# residual_75 => mul_37
# residual_76 => sub_39
# residual_77 => mul_38
# residual_78 => sub_40
# residual_79 => mul_39
# residual_80 => sub_41
# residual_81 => mul_40
# residual_82 => sub_42
# residual_83 => mul_41
# residual_84 => sub_43
# residual_85 => mul_42
# residual_86 => sub_44
# residual_87 => mul_43
# residual_88 => sub_45
# residual_89 => mul_44
# residual_90 => sub_46
# residual_91 => mul_45
# residual_92 => sub_47
# residual_93 => mul_46
# residual_94 => sub_48
# residual_95 => mul_47
# residual_96 => sub_49
# residual_97 => mul_48
# residual_98 => sub_50
# residual_99 => mul_49
# sum_30 => sum_31
# sum_31 => sum_32
# sum_32 => sum_33
# sum_33 => sum_34
# sum_34 => sum_35
# sum_35 => sum_36
# sum_36 => sum_37
# sum_37 => sum_38
# sum_38 => sum_39
# sum_39 => sum_40
# sum_40 => sum_41
# sum_41 => sum_42
# sum_42 => sum_43
# sum_43 => sum_44
# sum_44 => sum_45
# sum_45 => sum_46
# sum_46 => sum_47
# sum_47 => sum_48
# sum_48 => sum_49
# sum_49 => sum_50
# sum_50 => sum_51
# sum_51 => sum_52
# sum_52 => sum_53
# sum_53 => sum_54
# sum_54 => sum_55
# sum_55 => sum_56
# sum_56 => sum_57
# sum_57 => sum_58
# Graph fragment:
# %sub_30 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_88), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_30, %unsqueeze_89), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_29, [-1]), kwargs = {})
# %sub_31 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_91), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %unsqueeze_92), kwargs = {})
# %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_30, [-1]), kwargs = {})
# %sub_32 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_94), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %unsqueeze_95), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_31, [-1]), kwargs = {})
# %sub_33 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_97), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %unsqueeze_98), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_32, [-1]), kwargs = {})
# %sub_34 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_100), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %unsqueeze_101), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_33, [-1]), kwargs = {})
# %sub_35 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_103), kwargs = {})
# %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %unsqueeze_104), kwargs = {})
# %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_34, [-1]), kwargs = {})
# %sub_36 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_106), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_36, %unsqueeze_107), kwargs = {})
# %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_35, [-1]), kwargs = {})
# %sub_37 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_109), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_37, %unsqueeze_110), kwargs = {})
# %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_36, [-1]), kwargs = {})
# %sub_38 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_112), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %unsqueeze_113), kwargs = {})
# %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_37, [-1]), kwargs = {})
# %sub_39 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_115), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %unsqueeze_116), kwargs = {})
# %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_38, [-1]), kwargs = {})
# %sub_40 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_118), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_40, %unsqueeze_119), kwargs = {})
# %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_39, [-1]), kwargs = {})
# %sub_41 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_121), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %unsqueeze_122), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_40, [-1]), kwargs = {})
# %sub_42 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_124), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %unsqueeze_125), kwargs = {})
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_41, [-1]), kwargs = {})
# %sub_43 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_127), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_43, %unsqueeze_128), kwargs = {})
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_42, [-1]), kwargs = {})
# %sub_44 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_130), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %unsqueeze_131), kwargs = {})
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_43, [-1]), kwargs = {})
# %sub_45 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_133), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_45, %unsqueeze_134), kwargs = {})
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_44, [-1]), kwargs = {})
# %sub_46 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_136), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_46, %unsqueeze_137), kwargs = {})
# %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_45, [-1]), kwargs = {})
# %sub_47 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_139), kwargs = {})
# %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %unsqueeze_140), kwargs = {})
# %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_46, [-1]), kwargs = {})
# %sub_48 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_142), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_48, %unsqueeze_143), kwargs = {})
# %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_47, [-1]), kwargs = {})
# %sub_49 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_145), kwargs = {})
# %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_49, %unsqueeze_146), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_48, [-1]), kwargs = {})
# %sub_50 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_148), kwargs = {})
# %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %unsqueeze_149), kwargs = {})
# %sum_51 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_49, [-1]), kwargs = {})
# %sub_51 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_151), kwargs = {})
# %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_51, %unsqueeze_152), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_50, [-1]), kwargs = {})
# %sub_52 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_154), kwargs = {})
# %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_52, %unsqueeze_155), kwargs = {})
# %sum_53 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_51, [-1]), kwargs = {})
# %sub_53 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_157), kwargs = {})
# %mul_52 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_53, %unsqueeze_158), kwargs = {})
# %sum_54 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_52, [-1]), kwargs = {})
# %sub_54 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_160), kwargs = {})
# %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_54, %unsqueeze_161), kwargs = {})
# %sum_55 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_53, [-1]), kwargs = {})
# %sub_55 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_163), kwargs = {})
# %mul_54 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_55, %unsqueeze_164), kwargs = {})
# %sum_56 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_54, [-1]), kwargs = {})
# %sub_56 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_166), kwargs = {})
# %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_56, %unsqueeze_167), kwargs = {})
# %sum_57 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_55, [-1]), kwargs = {})
# %sub_57 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_169), kwargs = {})
# %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %unsqueeze_170), kwargs = {})
# %sum_58 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_56, [-1]), kwargs = {})
triton_per_fused_mul_sub_sum_2 = async_compile.triton('triton_per_fused_mul_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: 'i32', 62: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 59, 'num_reduction': 28, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (116 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (120 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (124 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (132 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (136 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (140 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (144 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (148 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (152 + x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (156 + x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (160 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (164 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (168 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (172 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (176 + x0), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (180 + x0), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr1 + (184 + x0), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (188 + x0), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr1 + (196 + x0), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr1 + (200 + x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr1 + (204 + x0), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr1 + (208 + x0), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr1 + (212 + x0), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr1 + (216 + x0), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr1 + (220 + x0), xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr1 + (224 + x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr2 + (464 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr3 + (r2 + (16*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp61 = tl.load(in_ptr4 + (r2 + (16*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp68 = tl.load(in_ptr2 + (480 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp77 = tl.load(in_ptr2 + (496 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp86 = tl.load(in_ptr2 + (512 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp95 = tl.load(in_ptr2 + (528 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp104 = tl.load(in_ptr2 + (544 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp113 = tl.load(in_ptr2 + (560 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp122 = tl.load(in_ptr2 + (576 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp131 = tl.load(in_ptr2 + (592 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp140 = tl.load(in_ptr2 + (608 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp149 = tl.load(in_ptr2 + (624 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp158 = tl.load(in_ptr2 + (640 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp167 = tl.load(in_ptr2 + (656 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp176 = tl.load(in_ptr2 + (672 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp185 = tl.load(in_ptr2 + (688 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp194 = tl.load(in_ptr2 + (704 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp203 = tl.load(in_ptr2 + (720 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp212 = tl.load(in_ptr2 + (736 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp221 = tl.load(in_ptr2 + (752 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp230 = tl.load(in_ptr2 + (768 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp239 = tl.load(in_ptr2 + (784 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp248 = tl.load(in_ptr2 + (800 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp257 = tl.load(in_ptr2 + (816 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp266 = tl.load(in_ptr2 + (832 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp275 = tl.load(in_ptr2 + (848 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp284 = tl.load(in_ptr2 + (864 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp293 = tl.load(in_ptr2 + (880 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp302 = tl.load(in_ptr2 + (896 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp4 = tmp0 - tmp3
tmp6 = tmp0 - tmp5
tmp8 = tmp0 - tmp7
tmp10 = tmp0 - tmp9
tmp12 = tmp0 - tmp11
tmp14 = tmp0 - tmp13
tmp16 = tmp0 - tmp15
tmp18 = tmp0 - tmp17
tmp20 = tmp0 - tmp19
tmp22 = tmp0 - tmp21
tmp24 = tmp0 - tmp23
tmp26 = tmp0 - tmp25
tmp28 = tmp0 - tmp27
tmp30 = tmp0 - tmp29
tmp32 = tmp0 - tmp31
tmp34 = tmp0 - tmp33
tmp36 = tmp0 - tmp35
tmp38 = tmp0 - tmp37
tmp40 = tmp0 - tmp39
tmp42 = tmp0 - tmp41
tmp44 = tmp0 - tmp43
tmp46 = tmp0 - tmp45
tmp48 = tmp0 - tmp47
tmp50 = tmp0 - tmp49
tmp52 = tmp0 - tmp51
tmp54 = tmp0 - tmp53
tmp56 = tmp0 - tmp55
tmp59 = tmp57 - tmp58
tmp60 = tl_math.exp(tmp59)
tmp62 = tmp60 / tmp61
tmp63 = tmp2 * tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = tl.where(xmask, tmp64, 0)
tmp67 = tl.sum(tmp66, 1)[:, None]
tmp69 = tmp68 - tmp58
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 / tmp61
tmp72 = tmp4 * tmp71
tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK])
tmp75 = tl.where(xmask, tmp73, 0)
tmp76 = tl.sum(tmp75, 1)[:, None]
tmp78 = tmp77 - tmp58
tmp79 = tl_math.exp(tmp78)
tmp80 = tmp79 / tmp61
tmp81 = tmp6 * tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = tl.where(xmask, tmp82, 0)
tmp85 = tl.sum(tmp84, 1)[:, None]
tmp87 = tmp86 - tmp58
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp88 / tmp61
tmp90 = tmp8 * tmp89
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp93 = tl.where(xmask, tmp91, 0)
tmp94 = tl.sum(tmp93, 1)[:, None]
tmp96 = tmp95 - tmp58
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp97 / tmp61
tmp99 = tmp10 * tmp98
tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK])
tmp102 = tl.where(xmask, tmp100, 0)
tmp103 = tl.sum(tmp102, 1)[:, None]
tmp105 = tmp104 - tmp58
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp106 / tmp61
tmp108 = tmp12 * tmp107
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp111 = tl.where(xmask, tmp109, 0)
tmp112 = tl.sum(tmp111, 1)[:, None]
tmp114 = tmp113 - tmp58
tmp115 = tl_math.exp(tmp114)
tmp116 = tmp115 / tmp61
tmp117 = tmp14 * tmp116
tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK])
tmp120 = tl.where(xmask, tmp118, 0)
tmp121 = tl.sum(tmp120, 1)[:, None]
tmp123 = tmp122 - tmp58
tmp124 = tl_math.exp(tmp123)
tmp125 = tmp124 / tmp61
tmp126 = tmp16 * tmp125
tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK])
tmp129 = tl.where(xmask, tmp127, 0)
tmp130 = tl.sum(tmp129, 1)[:, None]
tmp132 = tmp131 - tmp58
tmp133 = tl_math.exp(tmp132)
tmp134 = tmp133 / tmp61
tmp135 = tmp18 * tmp134
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp138 = tl.where(xmask, tmp136, 0)
tmp139 = tl.sum(tmp138, 1)[:, None]
tmp141 = tmp140 - tmp58
tmp142 = tl_math.exp(tmp141)
tmp143 = tmp142 / tmp61
tmp144 = tmp20 * tmp143
tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK])
tmp147 = tl.where(xmask, tmp145, 0)
tmp148 = tl.sum(tmp147, 1)[:, None]
tmp150 = tmp149 - tmp58
tmp151 = tl_math.exp(tmp150)
tmp152 = tmp151 / tmp61
tmp153 = tmp22 * tmp152
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = tl.where(xmask, tmp154, 0)
tmp157 = tl.sum(tmp156, 1)[:, None]
tmp159 = tmp158 - tmp58
tmp160 = tl_math.exp(tmp159)
tmp161 = tmp160 / tmp61
tmp162 = tmp24 * tmp161
tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK])
tmp165 = tl.where(xmask, tmp163, 0)
tmp166 = tl.sum(tmp165, 1)[:, None]
tmp168 = tmp167 - tmp58
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp169 / tmp61
tmp171 = tmp26 * tmp170
tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK])
tmp174 = tl.where(xmask, tmp172, 0)
tmp175 = tl.sum(tmp174, 1)[:, None]
tmp177 = tmp176 - tmp58
tmp178 = tl_math.exp(tmp177)
tmp179 = tmp178 / tmp61
tmp180 = tmp28 * tmp179
tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK])
tmp183 = tl.where(xmask, tmp181, 0)
tmp184 = tl.sum(tmp183, 1)[:, None]
tmp186 = tmp185 - tmp58
tmp187 = tl_math.exp(tmp186)
tmp188 = tmp187 / tmp61
tmp189 = tmp30 * tmp188
tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK])
tmp192 = tl.where(xmask, tmp190, 0)
tmp193 = tl.sum(tmp192, 1)[:, None]
tmp195 = tmp194 - tmp58
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp196 / tmp61
tmp198 = tmp32 * tmp197
tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK])
tmp201 = tl.where(xmask, tmp199, 0)
tmp202 = tl.sum(tmp201, 1)[:, None]
tmp204 = tmp203 - tmp58
tmp205 = tl_math.exp(tmp204)
tmp206 = tmp205 / tmp61
tmp207 = tmp34 * tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = tl.where(xmask, tmp208, 0)
tmp211 = tl.sum(tmp210, 1)[:, None]
tmp213 = tmp212 - tmp58
tmp214 = tl_math.exp(tmp213)
tmp215 = tmp214 / tmp61
tmp216 = tmp36 * tmp215
tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK])
tmp219 = tl.where(xmask, tmp217, 0)
tmp220 = tl.sum(tmp219, 1)[:, None]
tmp222 = tmp221 - tmp58
tmp223 = tl_math.exp(tmp222)
tmp224 = tmp223 / tmp61
tmp225 = tmp38 * tmp224
tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK])
tmp228 = tl.where(xmask, tmp226, 0)
tmp229 = tl.sum(tmp228, 1)[:, None]
tmp231 = tmp230 - tmp58
tmp232 = tl_math.exp(tmp231)
tmp233 = tmp232 / tmp61
tmp234 = tmp40 * tmp233
tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK])
tmp237 = tl.where(xmask, tmp235, 0)
tmp238 = tl.sum(tmp237, 1)[:, None]
tmp240 = tmp239 - tmp58
tmp241 = tl_math.exp(tmp240)
tmp242 = tmp241 / tmp61
tmp243 = tmp42 * tmp242
tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK])
tmp246 = tl.where(xmask, tmp244, 0)
tmp247 = tl.sum(tmp246, 1)[:, None]
tmp249 = tmp248 - tmp58
tmp250 = tl_math.exp(tmp249)
tmp251 = tmp250 / tmp61
tmp252 = tmp44 * tmp251
tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK])
tmp255 = tl.where(xmask, tmp253, 0)
tmp256 = tl.sum(tmp255, 1)[:, None]
tmp258 = tmp257 - tmp58
tmp259 = tl_math.exp(tmp258)
tmp260 = tmp259 / tmp61
tmp261 = tmp46 * tmp260
tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK])
tmp264 = tl.where(xmask, tmp262, 0)
tmp265 = tl.sum(tmp264, 1)[:, None]
tmp267 = tmp266 - tmp58
tmp268 = tl_math.exp(tmp267)
tmp269 = tmp268 / tmp61
tmp270 = tmp48 * tmp269
tmp271 = tl.broadcast_to(tmp270, [XBLOCK, RBLOCK])
tmp273 = tl.where(xmask, tmp271, 0)
tmp274 = tl.sum(tmp273, 1)[:, None]
tmp276 = tmp275 - tmp58
tmp277 = tl_math.exp(tmp276)
tmp278 = tmp277 / tmp61
tmp279 = tmp50 * tmp278
tmp280 = tl.broadcast_to(tmp279, [XBLOCK, RBLOCK])
tmp282 = tl.where(xmask, tmp280, 0)
tmp283 = tl.sum(tmp282, 1)[:, None]
tmp285 = tmp284 - tmp58
tmp286 = tl_math.exp(tmp285)
tmp287 = tmp286 / tmp61
tmp288 = tmp52 * tmp287
tmp289 = tl.broadcast_to(tmp288, [XBLOCK, RBLOCK])
tmp291 = tl.where(xmask, tmp289, 0)
tmp292 = tl.sum(tmp291, 1)[:, None]
tmp294 = tmp293 - tmp58
tmp295 = tl_math.exp(tmp294)
tmp296 = tmp295 / tmp61
tmp297 = tmp54 * tmp296
tmp298 = tl.broadcast_to(tmp297, [XBLOCK, RBLOCK])
tmp300 = tl.where(xmask, tmp298, 0)
tmp301 = tl.sum(tmp300, 1)[:, None]
tmp303 = tmp302 - tmp58
tmp304 = tl_math.exp(tmp303)
tmp305 = tmp304 / tmp61
tmp306 = tmp56 * tmp305
tmp307 = tl.broadcast_to(tmp306, [XBLOCK, RBLOCK])
tmp309 = tl.where(xmask, tmp307, 0)
tmp310 = tl.sum(tmp309, 1)[:, None]
tl.store(out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr1 + (r2 + (16*x3)), tmp4, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp6, xmask)
tl.store(out_ptr3 + (r2 + (16*x3)), tmp8, xmask)
tl.store(out_ptr4 + (r2 + (16*x3)), tmp10, xmask)
tl.store(out_ptr5 + (r2 + (16*x3)), tmp12, xmask)
tl.store(out_ptr6 + (r2 + (16*x3)), tmp14, xmask)
tl.store(out_ptr7 + (r2 + (16*x3)), tmp16, xmask)
tl.store(out_ptr8 + (r2 + (16*x3)), tmp18, xmask)
tl.store(out_ptr9 + (r2 + (16*x3)), tmp20, xmask)
tl.store(out_ptr10 + (r2 + (16*x3)), tmp22, xmask)
tl.store(out_ptr11 + (r2 + (16*x3)), tmp24, xmask)
tl.store(out_ptr12 + (r2 + (16*x3)), tmp26, xmask)
tl.store(out_ptr13 + (r2 + (16*x3)), tmp28, xmask)
tl.store(out_ptr14 + (r2 + (16*x3)), tmp30, xmask)
tl.store(out_ptr15 + (r2 + (16*x3)), tmp32, xmask)
tl.store(out_ptr16 + (r2 + (16*x3)), tmp34, xmask)
tl.store(out_ptr17 + (r2 + (16*x3)), tmp36, xmask)
tl.store(out_ptr18 + (r2 + (16*x3)), tmp38, xmask)
tl.store(out_ptr19 + (r2 + (16*x3)), tmp40, xmask)
tl.store(out_ptr20 + (r2 + (16*x3)), tmp42, xmask)
tl.store(out_ptr21 + (r2 + (16*x3)), tmp44, xmask)
tl.store(out_ptr22 + (r2 + (16*x3)), tmp46, xmask)
tl.store(out_ptr23 + (r2 + (16*x3)), tmp48, xmask)
tl.store(out_ptr24 + (r2 + (16*x3)), tmp50, xmask)
tl.store(out_ptr25 + (r2 + (16*x3)), tmp52, xmask)
tl.store(out_ptr26 + (r2 + (16*x3)), tmp54, xmask)
tl.store(out_ptr27 + (r2 + (16*x3)), tmp56, xmask)
tl.store(out_ptr28 + (x3), tmp67, xmask)
tl.store(out_ptr29 + (x3), tmp76, xmask)
tl.store(out_ptr30 + (x3), tmp85, xmask)
tl.store(out_ptr31 + (x3), tmp94, xmask)
tl.store(out_ptr32 + (x3), tmp103, xmask)
tl.store(out_ptr33 + (x3), tmp112, xmask)
tl.store(out_ptr34 + (x3), tmp121, xmask)
tl.store(out_ptr35 + (x3), tmp130, xmask)
tl.store(out_ptr36 + (x3), tmp139, xmask)
tl.store(out_ptr37 + (x3), tmp148, xmask)
tl.store(out_ptr38 + (x3), tmp157, xmask)
tl.store(out_ptr39 + (x3), tmp166, xmask)
tl.store(out_ptr40 + (x3), tmp175, xmask)
tl.store(out_ptr41 + (x3), tmp184, xmask)
tl.store(out_ptr42 + (x3), tmp193, xmask)
tl.store(out_ptr43 + (x3), tmp202, xmask)
tl.store(out_ptr44 + (x3), tmp211, xmask)
tl.store(out_ptr45 + (x3), tmp220, xmask)
tl.store(out_ptr46 + (x3), tmp229, xmask)
tl.store(out_ptr47 + (x3), tmp238, xmask)
tl.store(out_ptr48 + (x3), tmp247, xmask)
tl.store(out_ptr49 + (x3), tmp256, xmask)
tl.store(out_ptr50 + (x3), tmp265, xmask)
tl.store(out_ptr51 + (x3), tmp274, xmask)
tl.store(out_ptr52 + (x3), tmp283, xmask)
tl.store(out_ptr53 + (x3), tmp292, xmask)
tl.store(out_ptr54 + (x3), tmp301, xmask)
tl.store(out_ptr55 + (x3), tmp310, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/st/cstxuovzesn23qbkbmy6v42shqz4pls6o6mkdxfmwjszj5mvqofd.py
# Topologically Sorted Source Nodes: [residual_114, residual_115, sum_58, residual_116, residual_117, sum_59, residual_118, residual_119, sum_60, residual_120, residual_121, sum_61, residual_122, residual_123, sum_62, residual_124, residual_125, sum_63, residual_126, residual_127, sum_64], Original ATen: [aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# residual_114 => sub_58
# residual_115 => mul_57
# residual_116 => sub_59
# residual_117 => mul_58
# residual_118 => sub_60
# residual_119 => mul_59
# residual_120 => sub_61
# residual_121 => mul_60
# residual_122 => sub_62
# residual_123 => mul_61
# residual_124 => sub_63
# residual_125 => mul_62
# residual_126 => sub_64
# residual_127 => mul_63
# sum_58 => sum_59
# sum_59 => sum_60
# sum_60 => sum_61
# sum_61 => sum_62
# sum_62 => sum_63
# sum_63 => sum_64
# sum_64 => sum_65
# Graph fragment:
# %sub_58 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_172), kwargs = {})
# %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_58, %unsqueeze_173), kwargs = {})
# %sum_59 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_57, [-1]), kwargs = {})
# %sub_59 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_175), kwargs = {})
# %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_59, %unsqueeze_176), kwargs = {})
# %sum_60 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_58, [-1]), kwargs = {})
# %sub_60 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_178), kwargs = {})
# %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_60, %unsqueeze_179), kwargs = {})
# %sum_61 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_59, [-1]), kwargs = {})
# %sub_61 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_181), kwargs = {})
# %mul_60 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_61, %unsqueeze_182), kwargs = {})
# %sum_62 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_60, [-1]), kwargs = {})
# %sub_62 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_184), kwargs = {})
# %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_62, %unsqueeze_185), kwargs = {})
# %sum_63 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_61, [-1]), kwargs = {})
# %sub_63 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_187), kwargs = {})
# %mul_62 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_63, %unsqueeze_188), kwargs = {})
# %sum_64 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_62, [-1]), kwargs = {})
# %sub_64 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_190), kwargs = {})
# %mul_63 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_64, %unsqueeze_191), kwargs = {})
# %sum_65 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_63, [-1]), kwargs = {})
triton_per_fused_mul_sub_sum_3 = async_compile.triton('triton_per_fused_mul_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: 'i32', 20: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 7, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (228 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (232 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (236 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (240 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (244 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (248 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (252 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (912 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (r2 + (16*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tl.load(in_ptr4 + (r2 + (16*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tl.load(in_ptr2 + (928 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (944 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr2 + (960 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp53 = tl.load(in_ptr2 + (976 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp62 = tl.load(in_ptr2 + (992 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp71 = tl.load(in_ptr2 + (1008 + r2 + (1024*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp4 = tmp0 - tmp3
tmp6 = tmp0 - tmp5
tmp8 = tmp0 - tmp7
tmp10 = tmp0 - tmp9
tmp12 = tmp0 - tmp11
tmp14 = tmp0 - tmp13
tmp17 = tmp15 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp20 = tmp18 / tmp19
tmp21 = tmp2 * tmp20
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.where(xmask, tmp22, 0)
tmp25 = tl.sum(tmp24, 1)[:, None]
tmp27 = tmp26 - tmp16
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp19
tmp30 = tmp4 * tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.where(xmask, tmp31, 0)
tmp34 = tl.sum(tmp33, 1)[:, None]
tmp36 = tmp35 - tmp16
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp37 / tmp19
tmp39 = tmp6 * tmp38
tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp42 = tl.where(xmask, tmp40, 0)
tmp43 = tl.sum(tmp42, 1)[:, None]
tmp45 = tmp44 - tmp16
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp46 / tmp19
tmp48 = tmp8 * tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.where(xmask, tmp49, 0)
tmp52 = tl.sum(tmp51, 1)[:, None]
tmp54 = tmp53 - tmp16
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp55 / tmp19
tmp57 = tmp10 * tmp56
tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK])
tmp60 = tl.where(xmask, tmp58, 0)
tmp61 = tl.sum(tmp60, 1)[:, None]
tmp63 = tmp62 - tmp16
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp64 / tmp19
tmp66 = tmp12 * tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = tl.where(xmask, tmp67, 0)
tmp70 = tl.sum(tmp69, 1)[:, None]
tmp72 = tmp71 - tmp16
tmp73 = tl_math.exp(tmp72)
tmp74 = tmp73 / tmp19
tmp75 = tmp14 * tmp74
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.where(xmask, tmp76, 0)
tmp79 = tl.sum(tmp78, 1)[:, None]
tl.store(out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr1 + (r2 + (16*x3)), tmp4, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp6, xmask)
tl.store(out_ptr3 + (r2 + (16*x3)), tmp8, xmask)
tl.store(out_ptr4 + (r2 + (16*x3)), tmp10, xmask)
tl.store(out_ptr5 + (r2 + (16*x3)), tmp12, xmask)
tl.store(out_ptr6 + (r2 + (16*x3)), tmp14, xmask)
tl.store(out_ptr7 + (x3), tmp25, xmask)
tl.store(out_ptr8 + (x3), tmp34, xmask)
tl.store(out_ptr9 + (x3), tmp43, xmask)
tl.store(out_ptr10 + (x3), tmp52, xmask)
tl.store(out_ptr11 + (x3), tmp61, xmask)
tl.store(out_ptr12 + (x3), tmp70, xmask)
tl.store(out_ptr13 + (x3), tmp79, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oc/coczrn6vl6mexwre2io3xtgstw7pjvmjymjanrr2pcnzp7gd7wea.py
# Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63], Original ATen: [aten.zeros, aten.copy]
# Source node to ATen node mapping:
# setitem => copy
# setitem_1 => copy_1
# setitem_10 => copy_10
# setitem_11 => copy_11
# setitem_12 => copy_12
# setitem_13 => copy_13
# setitem_14 => copy_14
# setitem_15 => copy_15
# setitem_16 => copy_16
# setitem_17 => copy_17
# setitem_18 => copy_18
# setitem_19 => copy_19
# setitem_2 => copy_2
# setitem_20 => copy_20
# setitem_21 => copy_21
# setitem_22 => copy_22
# setitem_23 => copy_23
# setitem_24 => copy_24
# setitem_25 => copy_25
# setitem_26 => copy_26
# setitem_27 => copy_27
# setitem_28 => copy_28
# setitem_29 => copy_29
# setitem_3 => copy_3
# setitem_30 => copy_30
# setitem_31 => copy_31
# setitem_32 => copy_32
# setitem_33 => copy_33
# setitem_34 => copy_34
# setitem_35 => copy_35
# setitem_36 => copy_36
# setitem_37 => copy_37
# setitem_38 => copy_38
# setitem_39 => copy_39
# setitem_4 => copy_4
# setitem_40 => copy_40
# setitem_41 => copy_41
# setitem_42 => copy_42
# setitem_43 => copy_43
# setitem_44 => copy_44
# setitem_45 => copy_45
# setitem_46 => copy_46
# setitem_47 => copy_47
# setitem_48 => copy_48
# setitem_49 => copy_49
# setitem_5 => copy_5
# setitem_50 => copy_50
# setitem_51 => copy_51
# setitem_52 => copy_52
# setitem_53 => copy_53
# setitem_54 => copy_54
# setitem_55 => copy_55
# setitem_56 => copy_56
# setitem_57 => copy_57
# setitem_58 => copy_58
# setitem_59 => copy_59
# setitem_6 => copy_6
# setitem_60 => copy_60
# setitem_61 => copy_61
# setitem_62 => copy_62
# setitem_63 => copy_63
# setitem_7 => copy_7
# setitem_8 => copy_8
# setitem_9 => copy_9
# vlad => full
# Graph fragment:
# %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 64, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_7, %sum_2), kwargs = {})
# %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %copy, 1, 0, 1), kwargs = {})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_26, %sum_3), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 1, 1, 2), kwargs = {})
# %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_45, %sum_4), kwargs = {})
# %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %copy_2, 1, 2, 3), kwargs = {})
# %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_64, %sum_5), kwargs = {})
# %slice_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %copy_3, 1, 3, 4), kwargs = {})
# %copy_4 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_83, %sum_6), kwargs = {})
# %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %copy_4, 1, 4, 5), kwargs = {})
# %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_102, %sum_7), kwargs = {})
# %slice_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %copy_5, 1, 5, 6), kwargs = {})
# %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_121, %sum_8), kwargs = {})
# %slice_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_5, %copy_6, 1, 6, 7), kwargs = {})
# %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_140, %sum_9), kwargs = {})
# %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_6, %copy_7, 1, 7, 8), kwargs = {})
# %copy_8 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_159, %sum_10), kwargs = {})
# %slice_scatter_default_8 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %copy_8, 1, 8, 9), kwargs = {})
# %copy_9 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_178, %sum_11), kwargs = {})
# %slice_scatter_default_9 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_8, %copy_9, 1, 9, 10), kwargs = {})
# %copy_10 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_197, %sum_12), kwargs = {})
# %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %copy_10, 1, 10, 11), kwargs = {})
# %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_216, %sum_13), kwargs = {})
# %slice_scatter_default_11 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %copy_11, 1, 11, 12), kwargs = {})
# %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_235, %sum_14), kwargs = {})
# %slice_scatter_default_12 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_11, %copy_12, 1, 12, 13), kwargs = {})
# %copy_13 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_254, %sum_15), kwargs = {})
# %slice_scatter_default_13 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_12, %copy_13, 1, 13, 14), kwargs = {})
# %copy_14 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_273, %sum_16), kwargs = {})
# %slice_scatter_default_14 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_13, %copy_14, 1, 14, 15), kwargs = {})
# %copy_15 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_292, %sum_17), kwargs = {})
# %slice_scatter_default_15 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_14, %copy_15, 1, 15, 16), kwargs = {})
# %copy_16 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_311, %sum_18), kwargs = {})
# %slice_scatter_default_16 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_15, %copy_16, 1, 16, 17), kwargs = {})
# %copy_17 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_330, %sum_19), kwargs = {})
# %slice_scatter_default_17 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_16, %copy_17, 1, 17, 18), kwargs = {})
# %copy_18 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_349, %sum_20), kwargs = {})
# %slice_scatter_default_18 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_17, %copy_18, 1, 18, 19), kwargs = {})
# %copy_19 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_368, %sum_21), kwargs = {})
# %slice_scatter_default_19 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_18, %copy_19, 1, 19, 20), kwargs = {})
# %copy_20 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_387, %sum_22), kwargs = {})
# %slice_scatter_default_20 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_19, %copy_20, 1, 20, 21), kwargs = {})
# %copy_21 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_406, %sum_23), kwargs = {})
# %slice_scatter_default_21 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_20, %copy_21, 1, 21, 22), kwargs = {})
# %copy_22 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_425, %sum_24), kwargs = {})
# %slice_scatter_default_22 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_21, %copy_22, 1, 22, 23), kwargs = {})
# %copy_23 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_444, %sum_25), kwargs = {})
# %slice_scatter_default_23 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_22, %copy_23, 1, 23, 24), kwargs = {})
# %copy_24 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_463, %sum_26), kwargs = {})
# %slice_scatter_default_24 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_23, %copy_24, 1, 24, 25), kwargs = {})
# %copy_25 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_482, %sum_27), kwargs = {})
# %slice_scatter_default_25 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_24, %copy_25, 1, 25, 26), kwargs = {})
# %copy_26 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_501, %sum_28), kwargs = {})
# %slice_scatter_default_26 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_25, %copy_26, 1, 26, 27), kwargs = {})
# %copy_27 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_520, %sum_29), kwargs = {})
# %slice_scatter_default_27 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_26, %copy_27, 1, 27, 28), kwargs = {})
# %copy_28 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_539, %sum_30), kwargs = {})
# %slice_scatter_default_28 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_27, %copy_28, 1, 28, 29), kwargs = {})
# %copy_29 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_558, %sum_31), kwargs = {})
# %slice_scatter_default_29 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_28, %copy_29, 1, 29, 30), kwargs = {})
# %copy_30 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_577, %sum_32), kwargs = {})
# %slice_scatter_default_30 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_29, %copy_30, 1, 30, 31), kwargs = {})
# %copy_31 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_596, %sum_33), kwargs = {})
# %slice_scatter_default_31 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_30, %copy_31, 1, 31, 32), kwargs = {})
# %copy_32 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_615, %sum_34), kwargs = {})
# %slice_scatter_default_32 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_31, %copy_32, 1, 32, 33), kwargs = {})
# %copy_33 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_634, %sum_35), kwargs = {})
# %slice_scatter_default_33 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_32, %copy_33, 1, 33, 34), kwargs = {})
# %copy_34 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_653, %sum_36), kwargs = {})
# %slice_scatter_default_34 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_33, %copy_34, 1, 34, 35), kwargs = {})
# %copy_35 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_672, %sum_37), kwargs = {})
# %slice_scatter_default_35 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_34, %copy_35, 1, 35, 36), kwargs = {})
# %copy_36 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_691, %sum_38), kwargs = {})
# %slice_scatter_default_36 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_35, %copy_36, 1, 36, 37), kwargs = {})
# %copy_37 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_710, %sum_39), kwargs = {})
# %slice_scatter_default_37 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_36, %copy_37, 1, 37, 38), kwargs = {})
# %copy_38 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_729, %sum_40), kwargs = {})
# %slice_scatter_default_38 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_37, %copy_38, 1, 38, 39), kwargs = {})
# %copy_39 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_748, %sum_41), kwargs = {})
# %slice_scatter_default_39 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_38, %copy_39, 1, 39, 40), kwargs = {})
# %copy_40 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_767, %sum_42), kwargs = {})
# %slice_scatter_default_40 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_39, %copy_40, 1, 40, 41), kwargs = {})
# %copy_41 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_786, %sum_43), kwargs = {})
# %slice_scatter_default_41 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_40, %copy_41, 1, 41, 42), kwargs = {})
# %copy_42 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_805, %sum_44), kwargs = {})
# %slice_scatter_default_42 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_41, %copy_42, 1, 42, 43), kwargs = {})
# %copy_43 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_824, %sum_45), kwargs = {})
# %slice_scatter_default_43 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_42, %copy_43, 1, 43, 44), kwargs = {})
# %copy_44 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_843, %sum_46), kwargs = {})
# %slice_scatter_default_44 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_43, %copy_44, 1, 44, 45), kwargs = {})
# %copy_45 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_862, %sum_47), kwargs = {})
# %slice_scatter_default_45 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_44, %copy_45, 1, 45, 46), kwargs = {})
# %copy_46 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_881, %sum_48), kwargs = {})
# %slice_scatter_default_46 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_45, %copy_46, 1, 46, 47), kwargs = {})
# %copy_47 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_900, %sum_49), kwargs = {})
# %slice_scatter_default_47 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_46, %copy_47, 1, 47, 48), kwargs = {})
# %copy_48 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_919, %sum_50), kwargs = {})
# %slice_scatter_default_48 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_47, %copy_48, 1, 48, 49), kwargs = {})
# %copy_49 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_938, %sum_51), kwargs = {})
# %slice_scatter_default_49 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_48, %copy_49, 1, 49, 50), kwargs = {})
# %copy_50 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_957, %sum_52), kwargs = {})
# %slice_scatter_default_50 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_49, %copy_50, 1, 50, 51), kwargs = {})
# %copy_51 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_976, %sum_53), kwargs = {})
# %slice_scatter_default_51 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_50, %copy_51, 1, 51, 52), kwargs = {})
# %copy_52 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_995, %sum_54), kwargs = {})
# %slice_scatter_default_52 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_51, %copy_52, 1, 52, 53), kwargs = {})
# %copy_53 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1014, %sum_55), kwargs = {})
# %slice_scatter_default_53 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_52, %copy_53, 1, 53, 54), kwargs = {})
# %copy_54 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1033, %sum_56), kwargs = {})
# %slice_scatter_default_54 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_53, %copy_54, 1, 54, 55), kwargs = {})
# %copy_55 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1052, %sum_57), kwargs = {})
# %slice_scatter_default_55 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_54, %copy_55, 1, 55, 56), kwargs = {})
# %copy_56 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1071, %sum_58), kwargs = {})
# %slice_scatter_default_56 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_55, %copy_56, 1, 56, 57), kwargs = {})
# %copy_57 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1090, %sum_59), kwargs = {})
# %slice_scatter_default_57 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_56, %copy_57, 1, 57, 58), kwargs = {})
# %copy_58 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1109, %sum_60), kwargs = {})
# %slice_scatter_default_58 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_57, %copy_58, 1, 58, 59), kwargs = {})
# %copy_59 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1128, %sum_61), kwargs = {})
# %slice_scatter_default_59 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_58, %copy_59, 1, 59, 60), kwargs = {})
# %copy_60 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1147, %sum_62), kwargs = {})
# %slice_scatter_default_60 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_59, %copy_60, 1, 60, 61), kwargs = {})
# %copy_61 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1166, %sum_63), kwargs = {})
# %slice_scatter_default_61 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_60, %copy_61, 1, 61, 62), kwargs = {})
# %copy_62 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1185, %sum_64), kwargs = {})
# %slice_scatter_default_62 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_61, %copy_62, 1, 62, 63), kwargs = {})
# %copy_63 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1204, %sum_65), kwargs = {})
# %slice_scatter_default_63 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_62, %copy_63, 1, 63, 64), kwargs = {})
triton_poi_fused_copy_zeros_4 = async_compile.triton('triton_poi_fused_copy_zeros_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_zeros_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 64, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_zeros_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 64
x0 = xindex % 4
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 >= tmp7
tmp9 = tmp0 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tl.load(in_ptr1 + (x0 + (4*x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 >= tmp12
tmp14 = tmp0 < tmp7
tmp15 = tmp13 & tmp14
tmp16 = tl.load(in_ptr2 + (x0 + (4*x2)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp0 >= tmp17
tmp19 = tmp0 < tmp12
tmp20 = tmp18 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + (4*x2)), tmp20 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp0 < tmp17
tmp23 = tl.load(in_ptr4 + (x0 + (4*x2)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = 0.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tl.where(tmp20, tmp21, tmp25)
tmp27 = tl.where(tmp15, tmp16, tmp26)
tmp28 = tl.where(tmp10, tmp11, tmp27)
tmp29 = tl.where(tmp5, tmp6, tmp28)
tmp30 = tl.full([1], 8, tl.int64)
tmp31 = tmp0 >= tmp30
tmp32 = tl.full([1], 9, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr5 + (x0 + (4*x2)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.full([1], 7, tl.int64)
tmp37 = tmp0 >= tmp36
tmp38 = tmp0 < tmp30
tmp39 = tmp37 & tmp38
tmp40 = tl.load(in_ptr6 + (x0 + (4*x2)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tl.full([1], 6, tl.int64)
tmp42 = tmp0 >= tmp41
tmp43 = tmp0 < tmp36
tmp44 = tmp42 & tmp43
tmp45 = tl.load(in_ptr7 + (x0 + (4*x2)), tmp44 & xmask, eviction_policy='evict_last', other=0.0)
tmp46 = tmp0 >= tmp3
tmp47 = tmp0 < tmp41
tmp48 = tmp46 & tmp47
tmp49 = tl.load(in_ptr8 + (x0 + (4*x2)), tmp48 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.where(tmp48, tmp49, tmp29)
tmp51 = tl.where(tmp44, tmp45, tmp50)
tmp52 = tl.where(tmp39, tmp40, tmp51)
tmp53 = tl.where(tmp34, tmp35, tmp52)
tmp54 = tl.full([1], 12, tl.int64)
tmp55 = tmp0 >= tmp54
tmp56 = tl.full([1], 13, tl.int64)
tmp57 = tmp0 < tmp56
tmp58 = tmp55 & tmp57
tmp59 = tl.load(in_ptr9 + (x0 + (4*x2)), tmp58 & xmask, eviction_policy='evict_last', other=0.0)
tmp60 = tl.full([1], 11, tl.int64)
tmp61 = tmp0 >= tmp60
tmp62 = tmp0 < tmp54
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr10 + (x0 + (4*x2)), tmp63 & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tl.full([1], 10, tl.int64)
tmp66 = tmp0 >= tmp65
tmp67 = tmp0 < tmp60
tmp68 = tmp66 & tmp67
tmp69 = tl.load(in_ptr11 + (x0 + (4*x2)), tmp68 & xmask, eviction_policy='evict_last', other=0.0)
tmp70 = tmp0 >= tmp32
tmp71 = tmp0 < tmp65
tmp72 = tmp70 & tmp71
tmp73 = tl.load(in_ptr12 + (x0 + (4*x2)), tmp72 & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp72, tmp73, tmp53)
tmp75 = tl.where(tmp68, tmp69, tmp74)
tmp76 = tl.where(tmp63, tmp64, tmp75)
tmp77 = tl.where(tmp58, tmp59, tmp76)
tmp78 = tl.full([1], 16, tl.int64)
tmp79 = tmp0 >= tmp78
tmp80 = tl.full([1], 17, tl.int64)
tmp81 = tmp0 < tmp80
tmp82 = tmp79 & tmp81
tmp83 = tl.load(in_ptr13 + (x0 + (4*x2)), tmp82 & xmask, eviction_policy='evict_last', other=0.0)
tmp84 = tl.full([1], 15, tl.int64)
tmp85 = tmp0 >= tmp84
tmp86 = tmp0 < tmp78
tmp87 = tmp85 & tmp86
tmp88 = tl.load(in_ptr14 + (x0 + (4*x2)), tmp87 & xmask, eviction_policy='evict_last', other=0.0)
tmp89 = tl.full([1], 14, tl.int64)
tmp90 = tmp0 >= tmp89
tmp91 = tmp0 < tmp84
tmp92 = tmp90 & tmp91
tmp93 = tl.load(in_ptr15 + (x0 + (4*x2)), tmp92 & xmask, eviction_policy='evict_last', other=0.0)
tmp94 = tmp0 >= tmp56
tmp95 = tmp0 < tmp89
tmp96 = tmp94 & tmp95
tmp97 = tl.load(in_ptr16 + (x0 + (4*x2)), tmp96 & xmask, eviction_policy='evict_last', other=0.0)
tmp98 = tl.where(tmp96, tmp97, tmp77)
tmp99 = tl.where(tmp92, tmp93, tmp98)
tmp100 = tl.where(tmp87, tmp88, tmp99)
tmp101 = tl.where(tmp82, tmp83, tmp100)
tmp102 = tl.full([1], 20, tl.int64)
tmp103 = tmp0 >= tmp102
tmp104 = tl.full([1], 21, tl.int64)
tmp105 = tmp0 < tmp104
tmp106 = tmp103 & tmp105
tmp107 = tl.load(in_ptr17 + (x0 + (4*x2)), tmp106 & xmask, eviction_policy='evict_last', other=0.0)
tmp108 = tl.full([1], 19, tl.int64)
tmp109 = tmp0 >= tmp108
tmp110 = tmp0 < tmp102
tmp111 = tmp109 & tmp110
tmp112 = tl.load(in_ptr18 + (x0 + (4*x2)), tmp111 & xmask, eviction_policy='evict_last', other=0.0)
tmp113 = tl.full([1], 18, tl.int64)
tmp114 = tmp0 >= tmp113
tmp115 = tmp0 < tmp108
tmp116 = tmp114 & tmp115
tmp117 = tl.load(in_ptr19 + (x0 + (4*x2)), tmp116 & xmask, eviction_policy='evict_last', other=0.0)
tmp118 = tmp0 >= tmp80
tmp119 = tmp0 < tmp113
tmp120 = tmp118 & tmp119
tmp121 = tl.load(in_ptr20 + (x0 + (4*x2)), tmp120 & xmask, eviction_policy='evict_last', other=0.0)
tmp122 = tl.where(tmp120, tmp121, tmp101)
tmp123 = tl.where(tmp116, tmp117, tmp122)
tmp124 = tl.where(tmp111, tmp112, tmp123)
tmp125 = tl.where(tmp106, tmp107, tmp124)
tmp126 = tl.full([1], 24, tl.int64)
tmp127 = tmp0 >= tmp126
tmp128 = tl.full([1], 25, tl.int64)
tmp129 = tmp0 < tmp128
tmp130 = tmp127 & tmp129
tmp131 = tl.load(in_ptr21 + (x0 + (4*x2)), tmp130 & xmask, eviction_policy='evict_last', other=0.0)
tmp132 = tl.full([1], 23, tl.int64)
tmp133 = tmp0 >= tmp132
tmp134 = tmp0 < tmp126
tmp135 = tmp133 & tmp134
tmp136 = tl.load(in_ptr22 + (x0 + (4*x2)), tmp135 & xmask, eviction_policy='evict_last', other=0.0)
tmp137 = tl.full([1], 22, tl.int64)
tmp138 = tmp0 >= tmp137
tmp139 = tmp0 < tmp132
tmp140 = tmp138 & tmp139
tmp141 = tl.load(in_ptr23 + (x0 + (4*x2)), tmp140 & xmask, eviction_policy='evict_last', other=0.0)
tmp142 = tmp0 >= tmp104
tmp143 = tmp0 < tmp137
tmp144 = tmp142 & tmp143
tmp145 = tl.load(in_ptr24 + (x0 + (4*x2)), tmp144 & xmask, eviction_policy='evict_last', other=0.0)
tmp146 = tl.where(tmp144, tmp145, tmp125)
tmp147 = tl.where(tmp140, tmp141, tmp146)
tmp148 = tl.where(tmp135, tmp136, tmp147)
tmp149 = tl.where(tmp130, tmp131, tmp148)
tmp150 = tl.full([1], 28, tl.int64)
tmp151 = tmp0 >= tmp150
tmp152 = tl.full([1], 29, tl.int64)
tmp153 = tmp0 < tmp152
tmp154 = tmp151 & tmp153
tmp155 = tl.load(in_ptr25 + (x0 + (4*x2)), tmp154 & xmask, eviction_policy='evict_last', other=0.0)
tmp156 = tl.full([1], 27, tl.int64)
tmp157 = tmp0 >= tmp156
tmp158 = tmp0 < tmp150
tmp159 = tmp157 & tmp158
tmp160 = tl.load(in_ptr26 + (x0 + (4*x2)), tmp159 & xmask, eviction_policy='evict_last', other=0.0)
tmp161 = tl.full([1], 26, tl.int64)
tmp162 = tmp0 >= tmp161
tmp163 = tmp0 < tmp156
tmp164 = tmp162 & tmp163
tmp165 = tl.load(in_ptr27 + (x0 + (4*x2)), tmp164 & xmask, eviction_policy='evict_last', other=0.0)
tmp166 = tmp0 >= tmp128
tmp167 = tmp0 < tmp161
tmp168 = tmp166 & tmp167
tmp169 = tl.load(in_ptr28 + (x0 + (4*x2)), tmp168 & xmask, eviction_policy='evict_last', other=0.0)
tmp170 = tl.where(tmp168, tmp169, tmp149)
tmp171 = tl.where(tmp164, tmp165, tmp170)
tmp172 = tl.where(tmp159, tmp160, tmp171)
tmp173 = tl.where(tmp154, tmp155, tmp172)
tmp174 = tl.full([1], 32, tl.int64)
tmp175 = tmp0 >= tmp174
tmp176 = tl.full([1], 33, tl.int64)
tmp177 = tmp0 < tmp176
tmp178 = tmp175 & tmp177
tmp179 = tl.load(in_ptr29 + (x0 + (4*x2)), tmp178 & xmask, eviction_policy='evict_last', other=0.0)
tmp180 = tl.full([1], 31, tl.int64)
tmp181 = tmp0 >= tmp180
tmp182 = tmp0 < tmp174
tmp183 = tmp181 & tmp182
tmp184 = tl.load(in_ptr30 + (x0 + (4*x2)), tmp183 & xmask, eviction_policy='evict_last', other=0.0)
tmp185 = tl.full([1], 30, tl.int64)
tmp186 = tmp0 >= tmp185
tmp187 = tmp0 < tmp180
tmp188 = tmp186 & tmp187
tmp189 = tl.load(in_ptr31 + (x0 + (4*x2)), tmp188 & xmask, eviction_policy='evict_last', other=0.0)
tmp190 = tmp0 >= tmp152
tmp191 = tmp0 < tmp185
tmp192 = tmp190 & tmp191
tmp193 = tl.load(in_ptr32 + (x0 + (4*x2)), tmp192 & xmask, eviction_policy='evict_last', other=0.0)
tmp194 = tl.where(tmp192, tmp193, tmp173)
tmp195 = tl.where(tmp188, tmp189, tmp194)
tmp196 = tl.where(tmp183, tmp184, tmp195)
tmp197 = tl.where(tmp178, tmp179, tmp196)
tmp198 = tl.full([1], 36, tl.int64)
tmp199 = tmp0 >= tmp198
tmp200 = tl.full([1], 37, tl.int64)
tmp201 = tmp0 < tmp200
tmp202 = tmp199 & tmp201
tmp203 = tl.load(in_ptr33 + (x0 + (4*x2)), tmp202 & xmask, eviction_policy='evict_last', other=0.0)
tmp204 = tl.full([1], 35, tl.int64)
tmp205 = tmp0 >= tmp204
tmp206 = tmp0 < tmp198
tmp207 = tmp205 & tmp206
tmp208 = tl.load(in_ptr34 + (x0 + (4*x2)), tmp207 & xmask, eviction_policy='evict_last', other=0.0)
tmp209 = tl.full([1], 34, tl.int64)
tmp210 = tmp0 >= tmp209
tmp211 = tmp0 < tmp204
tmp212 = tmp210 & tmp211
tmp213 = tl.load(in_ptr35 + (x0 + (4*x2)), tmp212 & xmask, eviction_policy='evict_last', other=0.0)
tmp214 = tmp0 >= tmp176
tmp215 = tmp0 < tmp209
tmp216 = tmp214 & tmp215
tmp217 = tl.load(in_ptr36 + (x0 + (4*x2)), tmp216 & xmask, eviction_policy='evict_last', other=0.0)
tmp218 = tl.where(tmp216, tmp217, tmp197)
tmp219 = tl.where(tmp212, tmp213, tmp218)
tmp220 = tl.where(tmp207, tmp208, tmp219)
tmp221 = tl.where(tmp202, tmp203, tmp220)
tmp222 = tl.full([1], 40, tl.int64)
tmp223 = tmp0 >= tmp222
tmp224 = tl.full([1], 41, tl.int64)
tmp225 = tmp0 < tmp224
tmp226 = tmp223 & tmp225
tmp227 = tl.load(in_ptr37 + (x0 + (4*x2)), tmp226 & xmask, eviction_policy='evict_last', other=0.0)
tmp228 = tl.full([1], 39, tl.int64)
tmp229 = tmp0 >= tmp228
tmp230 = tmp0 < tmp222
tmp231 = tmp229 & tmp230
tmp232 = tl.load(in_ptr38 + (x0 + (4*x2)), tmp231 & xmask, eviction_policy='evict_last', other=0.0)
tmp233 = tl.full([1], 38, tl.int64)
tmp234 = tmp0 >= tmp233
tmp235 = tmp0 < tmp228
tmp236 = tmp234 & tmp235
tmp237 = tl.load(in_ptr39 + (x0 + (4*x2)), tmp236 & xmask, eviction_policy='evict_last', other=0.0)
tmp238 = tmp0 >= tmp200
tmp239 = tmp0 < tmp233
tmp240 = tmp238 & tmp239
tmp241 = tl.load(in_ptr40 + (x0 + (4*x2)), tmp240 & xmask, eviction_policy='evict_last', other=0.0)
tmp242 = tl.where(tmp240, tmp241, tmp221)
tmp243 = tl.where(tmp236, tmp237, tmp242)
tmp244 = tl.where(tmp231, tmp232, tmp243)
tmp245 = tl.where(tmp226, tmp227, tmp244)
tmp246 = tl.full([1], 44, tl.int64)
tmp247 = tmp0 >= tmp246
tmp248 = tl.full([1], 45, tl.int64)
tmp249 = tmp0 < tmp248
tmp250 = tmp247 & tmp249
tmp251 = tl.load(in_ptr41 + (x0 + (4*x2)), tmp250 & xmask, eviction_policy='evict_last', other=0.0)
tmp252 = tl.full([1], 43, tl.int64)
tmp253 = tmp0 >= tmp252
tmp254 = tmp0 < tmp246
tmp255 = tmp253 & tmp254
tmp256 = tl.load(in_ptr42 + (x0 + (4*x2)), tmp255 & xmask, eviction_policy='evict_last', other=0.0)
tmp257 = tl.full([1], 42, tl.int64)
tmp258 = tmp0 >= tmp257
tmp259 = tmp0 < tmp252
tmp260 = tmp258 & tmp259
tmp261 = tl.load(in_ptr43 + (x0 + (4*x2)), tmp260 & xmask, eviction_policy='evict_last', other=0.0)
tmp262 = tmp0 >= tmp224
tmp263 = tmp0 < tmp257
tmp264 = tmp262 & tmp263
tmp265 = tl.load(in_ptr44 + (x0 + (4*x2)), tmp264 & xmask, eviction_policy='evict_last', other=0.0)
tmp266 = tl.where(tmp264, tmp265, tmp245)
tmp267 = tl.where(tmp260, tmp261, tmp266)
tmp268 = tl.where(tmp255, tmp256, tmp267)
tmp269 = tl.where(tmp250, tmp251, tmp268)
tmp270 = tl.full([1], 48, tl.int64)
tmp271 = tmp0 >= tmp270
tmp272 = tl.full([1], 49, tl.int64)
tmp273 = tmp0 < tmp272
tmp274 = tmp271 & tmp273
tmp275 = tl.load(in_ptr45 + (x0 + (4*x2)), tmp274 & xmask, eviction_policy='evict_last', other=0.0)
tmp276 = tl.full([1], 47, tl.int64)
tmp277 = tmp0 >= tmp276
tmp278 = tmp0 < tmp270
tmp279 = tmp277 & tmp278
tmp280 = tl.load(in_ptr46 + (x0 + (4*x2)), tmp279 & xmask, eviction_policy='evict_last', other=0.0)
tmp281 = tl.full([1], 46, tl.int64)
tmp282 = tmp0 >= tmp281
tmp283 = tmp0 < tmp276
tmp284 = tmp282 & tmp283
tmp285 = tl.load(in_ptr47 + (x0 + (4*x2)), tmp284 & xmask, eviction_policy='evict_last', other=0.0)
tmp286 = tmp0 >= tmp248
tmp287 = tmp0 < tmp281
tmp288 = tmp286 & tmp287
tmp289 = tl.load(in_ptr48 + (x0 + (4*x2)), tmp288 & xmask, eviction_policy='evict_last', other=0.0)
tmp290 = tl.where(tmp288, tmp289, tmp269)
tmp291 = tl.where(tmp284, tmp285, tmp290)
tmp292 = tl.where(tmp279, tmp280, tmp291)
tmp293 = tl.where(tmp274, tmp275, tmp292)
tmp294 = tl.full([1], 52, tl.int64)
tmp295 = tmp0 >= tmp294
tmp296 = tl.full([1], 53, tl.int64)
tmp297 = tmp0 < tmp296
tmp298 = tmp295 & tmp297
tmp299 = tl.load(in_ptr49 + (x0 + (4*x2)), tmp298 & xmask, eviction_policy='evict_last', other=0.0)
tmp300 = tl.full([1], 51, tl.int64)
tmp301 = tmp0 >= tmp300
tmp302 = tmp0 < tmp294
tmp303 = tmp301 & tmp302
tmp304 = tl.load(in_ptr50 + (x0 + (4*x2)), tmp303 & xmask, eviction_policy='evict_last', other=0.0)
tmp305 = tl.full([1], 50, tl.int64)
tmp306 = tmp0 >= tmp305
tmp307 = tmp0 < tmp300
tmp308 = tmp306 & tmp307
tmp309 = tl.load(in_ptr51 + (x0 + (4*x2)), tmp308 & xmask, eviction_policy='evict_last', other=0.0)
tmp310 = tmp0 >= tmp272
tmp311 = tmp0 < tmp305
tmp312 = tmp310 & tmp311
tmp313 = tl.load(in_ptr52 + (x0 + (4*x2)), tmp312 & xmask, eviction_policy='evict_last', other=0.0)
tmp314 = tl.where(tmp312, tmp313, tmp293)
tmp315 = tl.where(tmp308, tmp309, tmp314)
tmp316 = tl.where(tmp303, tmp304, tmp315)
tmp317 = tl.where(tmp298, tmp299, tmp316)
tmp318 = tl.full([1], 56, tl.int64)
tmp319 = tmp0 >= tmp318
tmp320 = tl.full([1], 57, tl.int64)
tmp321 = tmp0 < tmp320
tmp322 = tmp319 & tmp321
tmp323 = tl.load(in_ptr53 + (x0 + (4*x2)), tmp322 & xmask, eviction_policy='evict_last', other=0.0)
tmp324 = tl.full([1], 55, tl.int64)
tmp325 = tmp0 >= tmp324
tmp326 = tmp0 < tmp318
tmp327 = tmp325 & tmp326
tmp328 = tl.load(in_ptr54 + (x0 + (4*x2)), tmp327 & xmask, eviction_policy='evict_last', other=0.0)
tmp329 = tl.full([1], 54, tl.int64)
tmp330 = tmp0 >= tmp329
tmp331 = tmp0 < tmp324
tmp332 = tmp330 & tmp331
tmp333 = tl.load(in_ptr55 + (x0 + (4*x2)), tmp332 & xmask, eviction_policy='evict_last', other=0.0)
tmp334 = tmp0 >= tmp296
tmp335 = tmp0 < tmp329
tmp336 = tmp334 & tmp335
tmp337 = tl.load(in_ptr56 + (x0 + (4*x2)), tmp336 & xmask, eviction_policy='evict_last', other=0.0)
tmp338 = tl.where(tmp336, tmp337, tmp317)
tmp339 = tl.where(tmp332, tmp333, tmp338)
tmp340 = tl.where(tmp327, tmp328, tmp339)
tmp341 = tl.where(tmp322, tmp323, tmp340)
tmp342 = tl.full([1], 60, tl.int64)
tmp343 = tmp0 >= tmp342
tmp344 = tl.full([1], 61, tl.int64)
tmp345 = tmp0 < tmp344
tmp346 = tmp343 & tmp345
tmp347 = tl.load(in_ptr57 + (x0 + (4*x2)), tmp346 & xmask, eviction_policy='evict_last', other=0.0)
tmp348 = tl.full([1], 59, tl.int64)
tmp349 = tmp0 >= tmp348
tmp350 = tmp0 < tmp342
tmp351 = tmp349 & tmp350
tmp352 = tl.load(in_ptr58 + (x0 + (4*x2)), tmp351 & xmask, eviction_policy='evict_last', other=0.0)
tmp353 = tl.full([1], 58, tl.int64)
tmp354 = tmp0 >= tmp353
tmp355 = tmp0 < tmp348
tmp356 = tmp354 & tmp355
tmp357 = tl.load(in_ptr59 + (x0 + (4*x2)), tmp356 & xmask, eviction_policy='evict_last', other=0.0)
tmp358 = tmp0 >= tmp320
tmp359 = tmp0 < tmp353
tmp360 = tmp358 & tmp359
tmp361 = tl.load(in_ptr60 + (x0 + (4*x2)), tmp360 & xmask, eviction_policy='evict_last', other=0.0)
tmp362 = tl.where(tmp360, tmp361, tmp341)
tmp363 = tl.where(tmp356, tmp357, tmp362)
tmp364 = tl.where(tmp351, tmp352, tmp363)
tmp365 = tl.where(tmp346, tmp347, tmp364)
tmp366 = tl.full([1], 63, tl.int64)
tmp367 = tmp0 >= tmp366
tmp368 = tl.load(in_ptr61 + (x0 + (4*x2)), tmp367 & xmask, eviction_policy='evict_last', other=0.0)
tmp369 = tl.full([1], 62, tl.int64)
tmp370 = tmp0 >= tmp369
tmp371 = tmp0 < tmp366
tmp372 = tmp370 & tmp371
tmp373 = tl.load(in_ptr62 + (x0 + (4*x2)), tmp372 & xmask, eviction_policy='evict_last', other=0.0)
tmp374 = tmp0 >= tmp344
tmp375 = tmp0 < tmp369
tmp376 = tmp374 & tmp375
tmp377 = tl.load(in_ptr63 + (x0 + (4*x2)), tmp376 & xmask, eviction_policy='evict_last', other=0.0)
tmp378 = tl.where(tmp376, tmp377, tmp365)
tmp379 = tl.where(tmp372, tmp373, tmp378)
tmp380 = tl.where(tmp367, tmp368, tmp379)
tl.store(in_out_ptr0 + (x3), tmp380, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wo/cwoqglscqofsrwvjna2wblwxhomdjbzeemiddjzs3znbybhuucvg.py
# Topologically Sorted Source Nodes: [vlad_1, vlad_3], Original ATen: [aten.div, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# vlad_1 => div_1
# vlad_3 => div_2, pow_3, pow_4, sum_67
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%slice_scatter_default_63, %expand_64), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {})
# %sum_67 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {})
# %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_67, 0.5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %expand_65), kwargs = {})
triton_red_fused_div_linalg_vector_norm_5 = async_compile.triton('triton_red_fused_div_linalg_vector_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[4, 256],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_div_linalg_vector_norm_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 4
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r3 + (256*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + ((4*r2) + (256*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + (4*r2) + (256*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + (4*r2) + (256*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + (4*r2) + (256*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tl.store(out_ptr0 + (r3 + (256*x0)), tmp15, rmask & xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tmp20 = libdevice.sqrt(tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp21 = tl.load(out_ptr0 + (r3 + (256*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp22 = 1e-12
tmp23 = triton_helpers.maximum(tmp20, tmp22)
tmp24 = tmp21 / tmp23
tl.store(out_ptr1 + (r3 + (256*x0)), tmp24, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (64, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
buf2 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(buf0, buf1, buf2, 64, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf8 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf10 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf13 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf17 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf19 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf22 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf24 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf26 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf28 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf31 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf33 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf35 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf37 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf40 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf42 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf44 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf46 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf49 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf51 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf53 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf55 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf58 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf60 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf62 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf64 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf3 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf23 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf25 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf27 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf29 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf32 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf34 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf36 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf38 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf41 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf43 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf45 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf47 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf50 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf52 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf54 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf56 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf59 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf61 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf63 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf65 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_2, residual_3, sum_2, residual_4, residual_5, sum_3, residual_6, residual_7, sum_4, residual_8, residual_9, sum_5, residual_10, residual_11, sum_6, residual_12, residual_13, sum_7, residual_14, residual_15, sum_8, residual_16, residual_17, sum_9, residual_18, residual_19, sum_10, residual_20, residual_21, sum_11, residual_22, residual_23, sum_12, residual_24, residual_25, sum_13, residual_26, residual_27, sum_14, residual_28, residual_29, sum_15, residual_30, residual_31, sum_16, residual_32, residual_33, sum_17, residual_34, residual_35, sum_18, residual_36, residual_37, sum_19, residual_38, residual_39, sum_20, residual_40, residual_41, sum_21, residual_42, residual_43, sum_22, residual_44, residual_45, sum_23, residual_46, residual_47, sum_24, residual_48, residual_49, sum_25, residual_50, residual_51, sum_26, residual_52, residual_53, sum_27, residual_54, residual_55, sum_28, residual_56, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum]
triton_per_fused_mul_sub_sum_1.run(primals_1, primals_3, buf0, buf1, buf2, buf4, buf6, buf8, buf10, buf13, buf15, buf17, buf19, buf22, buf24, buf26, buf28, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf49, buf51, buf53, buf55, buf58, buf60, buf62, buf64, buf3, buf5, buf7, buf9, buf11, buf14, buf16, buf18, buf20, buf23, buf25, buf27, buf29, buf32, buf34, buf36, buf38, buf41, buf43, buf45, buf47, buf50, buf52, buf54, buf56, buf59, buf61, buf63, buf65, 16, 16, grid=grid(16), stream=stream0)
buf67 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf69 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf71 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf73 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf76 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf78 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf80 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf82 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf85 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf87 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf89 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf91 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf94 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf96 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf98 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf100 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf103 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf105 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf107 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf109 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf112 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf114 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf116 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf118 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf121 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf123 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf125 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf127 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf68 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf70 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf72 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf74 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf77 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf79 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf81 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf83 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf86 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf88 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf90 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf92 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf95 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf99 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf101 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf104 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf106 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf108 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf110 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf113 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf115 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf117 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf119 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf122 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf124 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf126 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf128 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual_58, residual_59, sum_30, residual_60, residual_61, sum_31, residual_62, residual_63, sum_32, residual_64, residual_65, sum_33, residual_66, residual_67, sum_34, residual_68, residual_69, sum_35, residual_70, residual_71, sum_36, residual_72, residual_73, sum_37, residual_74, residual_75, sum_38, residual_76, residual_77, sum_39, residual_78, residual_79, sum_40, residual_80, residual_81, sum_41, residual_82, residual_83, sum_42, residual_84, residual_85, sum_43, residual_86, residual_87, sum_44, residual_88, residual_89, sum_45, residual_90, residual_91, sum_46, residual_92, residual_93, sum_47, residual_94, residual_95, sum_48, residual_96, residual_97, sum_49, residual_98, residual_99, sum_50, residual_100, residual_101, sum_51, residual_102, residual_103, sum_52, residual_104, residual_105, sum_53, residual_106, residual_107, sum_54, residual_108, residual_109, sum_55, residual_110, residual_111, sum_56, residual_112, residual_113, sum_57], Original ATen: [aten.sub, aten.mul, aten.sum]
triton_per_fused_mul_sub_sum_2.run(primals_1, primals_3, buf0, buf1, buf2, buf67, buf69, buf71, buf73, buf76, buf78, buf80, buf82, buf85, buf87, buf89, buf91, buf94, buf96, buf98, buf100, buf103, buf105, buf107, buf109, buf112, buf114, buf116, buf118, buf121, buf123, buf125, buf127, buf68, buf70, buf72, buf74, buf77, buf79, buf81, buf83, buf86, buf88, buf90, buf92, buf95, buf97, buf99, buf101, buf104, buf106, buf108, buf110, buf113, buf115, buf117, buf119, buf122, buf124, buf126, buf128, 16, 16, grid=grid(16), stream=stream0)
buf130 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf132 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf134 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf136 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf139 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf141 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf143 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.float32)
buf131 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf133 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf135 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf137 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf140 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf142 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf144 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [residual_114, residual_115, sum_58, residual_116, residual_117, sum_59, residual_118, residual_119, sum_60, residual_120, residual_121, sum_61, residual_122, residual_123, sum_62, residual_124, residual_125, sum_63, residual_126, residual_127, sum_64], Original ATen: [aten.sub, aten.mul, aten.sum]
triton_per_fused_mul_sub_sum_3.run(primals_1, primals_3, buf0, buf1, buf2, buf130, buf132, buf134, buf136, buf139, buf141, buf143, buf131, buf133, buf135, buf137, buf140, buf142, buf144, 16, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32)
buf21 = buf12; del buf12 # reuse
buf30 = buf21; del buf21 # reuse
buf39 = buf30; del buf30 # reuse
buf48 = buf39; del buf39 # reuse
buf57 = buf48; del buf48 # reuse
buf66 = buf57; del buf57 # reuse
buf75 = buf66; del buf66 # reuse
buf84 = buf75; del buf75 # reuse
buf93 = buf84; del buf84 # reuse
buf102 = buf93; del buf93 # reuse
buf111 = buf102; del buf102 # reuse
buf120 = buf111; del buf111 # reuse
buf129 = buf120; del buf120 # reuse
buf138 = buf129; del buf129 # reuse
buf145 = buf138; del buf138 # reuse
# Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63], Original ATen: [aten.zeros, aten.copy]
triton_poi_fused_copy_zeros_4.run(buf145, buf11, buf9, buf7, buf5, buf3, buf20, buf18, buf16, buf14, buf29, buf27, buf25, buf23, buf38, buf36, buf34, buf32, buf47, buf45, buf43, buf41, buf56, buf54, buf52, buf50, buf65, buf63, buf61, buf59, buf74, buf72, buf70, buf68, buf83, buf81, buf79, buf77, buf92, buf90, buf88, buf86, buf101, buf99, buf97, buf95, buf110, buf108, buf106, buf104, buf119, buf117, buf115, buf113, buf128, buf126, buf124, buf122, buf137, buf135, buf133, buf131, buf144, buf142, buf140, 1024, grid=grid(1024), stream=stream0)
del buf101
del buf104
del buf106
del buf108
del buf11
del buf110
del buf113
del buf115
del buf117
del buf119
del buf122
del buf124
del buf126
del buf128
del buf131
del buf133
del buf135
del buf137
del buf14
del buf140
del buf142
del buf144
del buf16
del buf18
del buf20
del buf23
del buf25
del buf27
del buf29
del buf3
del buf32
del buf34
del buf36
del buf38
del buf41
del buf43
del buf45
del buf47
del buf5
del buf50
del buf52
del buf54
del buf56
del buf59
del buf61
del buf63
del buf65
del buf68
del buf7
del buf70
del buf72
del buf74
del buf77
del buf79
del buf81
del buf83
del buf86
del buf88
del buf9
del buf90
del buf92
del buf95
del buf97
del buf99
buf146 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32)
buf147 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf148 = reinterpret_tensor(buf147, (4, 1), (1, 1), 0); del buf147 # reuse
buf149 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [vlad_1, vlad_3], Original ATen: [aten.div, aten.linalg_vector_norm]
triton_red_fused_div_linalg_vector_norm_5.run(buf148, buf145, buf146, buf149, 4, 256, grid=grid(4), stream=stream0)
del buf146
return (buf149, primals_1, primals_2, buf0, buf1, buf2, reinterpret_tensor(primals_3, (1, 4), (4, 1), 0), buf4, buf6, buf8, buf10, buf13, buf15, buf17, buf19, buf22, buf24, buf26, buf28, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf49, buf51, buf53, buf55, buf58, buf60, buf62, buf64, buf67, buf69, buf71, buf73, buf76, buf78, buf80, buf82, buf85, buf87, buf89, buf91, buf94, buf96, buf98, buf100, buf103, buf105, buf107, buf109, buf112, buf114, buf116, buf118, buf121, buf123, buf125, buf127, buf130, buf132, buf134, buf136, buf139, buf141, buf143, buf145, buf148, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, dim, num_clusters=64):
"""
Args:
dim : int
Dimension of descriptors
num_clusters : int
The number of clusters
"""
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=False
)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
clsts_assign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clsts_assign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(alpha *
clsts_assign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
def forward(self, x, crm=None):
N, C = x.shape[:2]
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
if crm is not None:
assert crm.shape[0] == N and crm.shape[1] == 1 and crm.shape[2:
] == x.shape[2:]
soft_assign = torch.mul(soft_assign, crm.view(N, 1, -1))
x_flatten = x.view(N, C, -1)
vlad = torch.zeros((N, self.num_clusters, C), dtype=x.dtype, layout
=x.layout, device=x.device)
for c in range(self.num_clusters):
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3
) - self.centroids[c:c + 1, :].expand(x_flatten.size(-1), -
1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:, c:c + 1, :].unsqueeze(2)
vlad[:, c:c + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2)
vlad = vlad.view(N, -1)
vlad = F.normalize(vlad, p=2, dim=1)
return vlad
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused_mul_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12,
out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18,
out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24,
out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30,
out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36,
out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42,
out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48,
out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54,
out_ptr55, out_ptr56, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (20 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (24 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (28 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (32 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (36 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (40 + x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (44 + x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (48 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (52 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (56 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (60 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (68 + x0), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr1 + (72 + x0), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (76 + x0), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (80 + x0), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr1 + (84 + x0), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr1 + (88 + x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr1 + (92 + x0), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr1 + (96 + x0), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr1 + (100 + x0), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr1 + (104 + x0), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr1 + (108 + x0), xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr1 + (112 + x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp59 = tl.load(in_ptr2 + (r2 + 1024 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp60 = tl.load(in_ptr3 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp63 = tl.load(in_ptr4 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp70 = tl.load(in_ptr2 + (16 + r2 + 1024 * x1), xmask, eviction_policy
='evict_last', other=0.0)
tmp79 = tl.load(in_ptr2 + (32 + r2 + 1024 * x1), xmask, eviction_policy
='evict_last', other=0.0)
tmp88 = tl.load(in_ptr2 + (48 + r2 + 1024 * x1), xmask, eviction_policy
='evict_last', other=0.0)
tmp97 = tl.load(in_ptr2 + (64 + r2 + 1024 * x1), xmask, eviction_policy
='evict_last', other=0.0)
tmp106 = tl.load(in_ptr2 + (80 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp115 = tl.load(in_ptr2 + (96 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp124 = tl.load(in_ptr2 + (112 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp133 = tl.load(in_ptr2 + (128 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp142 = tl.load(in_ptr2 + (144 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp151 = tl.load(in_ptr2 + (160 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp160 = tl.load(in_ptr2 + (176 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp169 = tl.load(in_ptr2 + (192 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp178 = tl.load(in_ptr2 + (208 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp187 = tl.load(in_ptr2 + (224 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp196 = tl.load(in_ptr2 + (240 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp205 = tl.load(in_ptr2 + (256 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp214 = tl.load(in_ptr2 + (272 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp223 = tl.load(in_ptr2 + (288 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp232 = tl.load(in_ptr2 + (304 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp241 = tl.load(in_ptr2 + (320 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp250 = tl.load(in_ptr2 + (336 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp259 = tl.load(in_ptr2 + (352 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp268 = tl.load(in_ptr2 + (368 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp277 = tl.load(in_ptr2 + (384 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp286 = tl.load(in_ptr2 + (400 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp295 = tl.load(in_ptr2 + (416 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp304 = tl.load(in_ptr2 + (432 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp313 = tl.load(in_ptr2 + (448 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp4 = tmp0 - tmp3
tmp6 = tmp0 - tmp5
tmp8 = tmp0 - tmp7
tmp10 = tmp0 - tmp9
tmp12 = tmp0 - tmp11
tmp14 = tmp0 - tmp13
tmp16 = tmp0 - tmp15
tmp18 = tmp0 - tmp17
tmp20 = tmp0 - tmp19
tmp22 = tmp0 - tmp21
tmp24 = tmp0 - tmp23
tmp26 = tmp0 - tmp25
tmp28 = tmp0 - tmp27
tmp30 = tmp0 - tmp29
tmp32 = tmp0 - tmp31
tmp34 = tmp0 - tmp33
tmp36 = tmp0 - tmp35
tmp38 = tmp0 - tmp37
tmp40 = tmp0 - tmp39
tmp42 = tmp0 - tmp41
tmp44 = tmp0 - tmp43
tmp46 = tmp0 - tmp45
tmp48 = tmp0 - tmp47
tmp50 = tmp0 - tmp49
tmp52 = tmp0 - tmp51
tmp54 = tmp0 - tmp53
tmp56 = tmp0 - tmp55
tmp58 = tmp0 - tmp57
tmp61 = tmp59 - tmp60
tmp62 = tl_math.exp(tmp61)
tmp64 = tmp62 / tmp63
tmp65 = tmp58 * tmp64
tmp66 = tl.broadcast_to(tmp65, [XBLOCK, RBLOCK])
tmp68 = tl.where(xmask, tmp66, 0)
tmp69 = tl.sum(tmp68, 1)[:, None]
tmp71 = tmp70 - tmp60
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp72 / tmp63
tmp74 = tmp2 * tmp73
tmp75 = tl.broadcast_to(tmp74, [XBLOCK, RBLOCK])
tmp77 = tl.where(xmask, tmp75, 0)
tmp78 = tl.sum(tmp77, 1)[:, None]
tmp80 = tmp79 - tmp60
tmp81 = tl_math.exp(tmp80)
tmp82 = tmp81 / tmp63
tmp83 = tmp4 * tmp82
tmp84 = tl.broadcast_to(tmp83, [XBLOCK, RBLOCK])
tmp86 = tl.where(xmask, tmp84, 0)
tmp87 = tl.sum(tmp86, 1)[:, None]
tmp89 = tmp88 - tmp60
tmp90 = tl_math.exp(tmp89)
tmp91 = tmp90 / tmp63
tmp92 = tmp6 * tmp91
tmp93 = tl.broadcast_to(tmp92, [XBLOCK, RBLOCK])
tmp95 = tl.where(xmask, tmp93, 0)
tmp96 = tl.sum(tmp95, 1)[:, None]
tmp98 = tmp97 - tmp60
tmp99 = tl_math.exp(tmp98)
tmp100 = tmp99 / tmp63
tmp101 = tmp8 * tmp100
tmp102 = tl.broadcast_to(tmp101, [XBLOCK, RBLOCK])
tmp104 = tl.where(xmask, tmp102, 0)
tmp105 = tl.sum(tmp104, 1)[:, None]
tmp107 = tmp106 - tmp60
tmp108 = tl_math.exp(tmp107)
tmp109 = tmp108 / tmp63
tmp110 = tmp10 * tmp109
tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK])
tmp113 = tl.where(xmask, tmp111, 0)
tmp114 = tl.sum(tmp113, 1)[:, None]
tmp116 = tmp115 - tmp60
tmp117 = tl_math.exp(tmp116)
tmp118 = tmp117 / tmp63
tmp119 = tmp12 * tmp118
tmp120 = tl.broadcast_to(tmp119, [XBLOCK, RBLOCK])
tmp122 = tl.where(xmask, tmp120, 0)
tmp123 = tl.sum(tmp122, 1)[:, None]
tmp125 = tmp124 - tmp60
tmp126 = tl_math.exp(tmp125)
tmp127 = tmp126 / tmp63
tmp128 = tmp14 * tmp127
tmp129 = tl.broadcast_to(tmp128, [XBLOCK, RBLOCK])
tmp131 = tl.where(xmask, tmp129, 0)
tmp132 = tl.sum(tmp131, 1)[:, None]
tmp134 = tmp133 - tmp60
tmp135 = tl_math.exp(tmp134)
tmp136 = tmp135 / tmp63
tmp137 = tmp16 * tmp136
tmp138 = tl.broadcast_to(tmp137, [XBLOCK, RBLOCK])
tmp140 = tl.where(xmask, tmp138, 0)
tmp141 = tl.sum(tmp140, 1)[:, None]
tmp143 = tmp142 - tmp60
tmp144 = tl_math.exp(tmp143)
tmp145 = tmp144 / tmp63
tmp146 = tmp18 * tmp145
tmp147 = tl.broadcast_to(tmp146, [XBLOCK, RBLOCK])
tmp149 = tl.where(xmask, tmp147, 0)
tmp150 = tl.sum(tmp149, 1)[:, None]
tmp152 = tmp151 - tmp60
tmp153 = tl_math.exp(tmp152)
tmp154 = tmp153 / tmp63
tmp155 = tmp20 * tmp154
tmp156 = tl.broadcast_to(tmp155, [XBLOCK, RBLOCK])
tmp158 = tl.where(xmask, tmp156, 0)
tmp159 = tl.sum(tmp158, 1)[:, None]
tmp161 = tmp160 - tmp60
tmp162 = tl_math.exp(tmp161)
tmp163 = tmp162 / tmp63
tmp164 = tmp22 * tmp163
tmp165 = tl.broadcast_to(tmp164, [XBLOCK, RBLOCK])
tmp167 = tl.where(xmask, tmp165, 0)
tmp168 = tl.sum(tmp167, 1)[:, None]
tmp170 = tmp169 - tmp60
tmp171 = tl_math.exp(tmp170)
tmp172 = tmp171 / tmp63
tmp173 = tmp24 * tmp172
tmp174 = tl.broadcast_to(tmp173, [XBLOCK, RBLOCK])
tmp176 = tl.where(xmask, tmp174, 0)
tmp177 = tl.sum(tmp176, 1)[:, None]
tmp179 = tmp178 - tmp60
tmp180 = tl_math.exp(tmp179)
tmp181 = tmp180 / tmp63
tmp182 = tmp26 * tmp181
tmp183 = tl.broadcast_to(tmp182, [XBLOCK, RBLOCK])
tmp185 = tl.where(xmask, tmp183, 0)
tmp186 = tl.sum(tmp185, 1)[:, None]
tmp188 = tmp187 - tmp60
tmp189 = tl_math.exp(tmp188)
tmp190 = tmp189 / tmp63
tmp191 = tmp28 * tmp190
tmp192 = tl.broadcast_to(tmp191, [XBLOCK, RBLOCK])
tmp194 = tl.where(xmask, tmp192, 0)
tmp195 = tl.sum(tmp194, 1)[:, None]
tmp197 = tmp196 - tmp60
tmp198 = tl_math.exp(tmp197)
tmp199 = tmp198 / tmp63
tmp200 = tmp30 * tmp199
tmp201 = tl.broadcast_to(tmp200, [XBLOCK, RBLOCK])
tmp203 = tl.where(xmask, tmp201, 0)
tmp204 = tl.sum(tmp203, 1)[:, None]
tmp206 = tmp205 - tmp60
tmp207 = tl_math.exp(tmp206)
tmp208 = tmp207 / tmp63
tmp209 = tmp32 * tmp208
tmp210 = tl.broadcast_to(tmp209, [XBLOCK, RBLOCK])
tmp212 = tl.where(xmask, tmp210, 0)
tmp213 = tl.sum(tmp212, 1)[:, None]
tmp215 = tmp214 - tmp60
tmp216 = tl_math.exp(tmp215)
tmp217 = tmp216 / tmp63
tmp218 = tmp34 * tmp217
tmp219 = tl.broadcast_to(tmp218, [XBLOCK, RBLOCK])
tmp221 = tl.where(xmask, tmp219, 0)
tmp222 = tl.sum(tmp221, 1)[:, None]
tmp224 = tmp223 - tmp60
tmp225 = tl_math.exp(tmp224)
tmp226 = tmp225 / tmp63
tmp227 = tmp36 * tmp226
tmp228 = tl.broadcast_to(tmp227, [XBLOCK, RBLOCK])
tmp230 = tl.where(xmask, tmp228, 0)
tmp231 = tl.sum(tmp230, 1)[:, None]
tmp233 = tmp232 - tmp60
tmp234 = tl_math.exp(tmp233)
tmp235 = tmp234 / tmp63
tmp236 = tmp38 * tmp235
tmp237 = tl.broadcast_to(tmp236, [XBLOCK, RBLOCK])
tmp239 = tl.where(xmask, tmp237, 0)
tmp240 = tl.sum(tmp239, 1)[:, None]
tmp242 = tmp241 - tmp60
tmp243 = tl_math.exp(tmp242)
tmp244 = tmp243 / tmp63
tmp245 = tmp40 * tmp244
tmp246 = tl.broadcast_to(tmp245, [XBLOCK, RBLOCK])
tmp248 = tl.where(xmask, tmp246, 0)
tmp249 = tl.sum(tmp248, 1)[:, None]
tmp251 = tmp250 - tmp60
tmp252 = tl_math.exp(tmp251)
tmp253 = tmp252 / tmp63
tmp254 = tmp42 * tmp253
tmp255 = tl.broadcast_to(tmp254, [XBLOCK, RBLOCK])
tmp257 = tl.where(xmask, tmp255, 0)
tmp258 = tl.sum(tmp257, 1)[:, None]
tmp260 = tmp259 - tmp60
tmp261 = tl_math.exp(tmp260)
tmp262 = tmp261 / tmp63
tmp263 = tmp44 * tmp262
tmp264 = tl.broadcast_to(tmp263, [XBLOCK, RBLOCK])
tmp266 = tl.where(xmask, tmp264, 0)
tmp267 = tl.sum(tmp266, 1)[:, None]
tmp269 = tmp268 - tmp60
tmp270 = tl_math.exp(tmp269)
tmp271 = tmp270 / tmp63
tmp272 = tmp46 * tmp271
tmp273 = tl.broadcast_to(tmp272, [XBLOCK, RBLOCK])
tmp275 = tl.where(xmask, tmp273, 0)
tmp276 = tl.sum(tmp275, 1)[:, None]
tmp278 = tmp277 - tmp60
tmp279 = tl_math.exp(tmp278)
tmp280 = tmp279 / tmp63
tmp281 = tmp48 * tmp280
tmp282 = tl.broadcast_to(tmp281, [XBLOCK, RBLOCK])
tmp284 = tl.where(xmask, tmp282, 0)
tmp285 = tl.sum(tmp284, 1)[:, None]
tmp287 = tmp286 - tmp60
tmp288 = tl_math.exp(tmp287)
tmp289 = tmp288 / tmp63
tmp290 = tmp50 * tmp289
tmp291 = tl.broadcast_to(tmp290, [XBLOCK, RBLOCK])
tmp293 = tl.where(xmask, tmp291, 0)
tmp294 = tl.sum(tmp293, 1)[:, None]
tmp296 = tmp295 - tmp60
tmp297 = tl_math.exp(tmp296)
tmp298 = tmp297 / tmp63
tmp299 = tmp52 * tmp298
tmp300 = tl.broadcast_to(tmp299, [XBLOCK, RBLOCK])
tmp302 = tl.where(xmask, tmp300, 0)
tmp303 = tl.sum(tmp302, 1)[:, None]
tmp305 = tmp304 - tmp60
tmp306 = tl_math.exp(tmp305)
tmp307 = tmp306 / tmp63
tmp308 = tmp54 * tmp307
tmp309 = tl.broadcast_to(tmp308, [XBLOCK, RBLOCK])
tmp311 = tl.where(xmask, tmp309, 0)
tmp312 = tl.sum(tmp311, 1)[:, None]
tmp314 = tmp313 - tmp60
tmp315 = tl_math.exp(tmp314)
tmp316 = tmp315 / tmp63
tmp317 = tmp56 * tmp316
tmp318 = tl.broadcast_to(tmp317, [XBLOCK, RBLOCK])
tmp320 = tl.where(xmask, tmp318, 0)
tmp321 = tl.sum(tmp320, 1)[:, None]
tl.store(out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp4, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp6, xmask)
tl.store(out_ptr3 + (r2 + 16 * x3), tmp8, xmask)
tl.store(out_ptr4 + (r2 + 16 * x3), tmp10, xmask)
tl.store(out_ptr5 + (r2 + 16 * x3), tmp12, xmask)
tl.store(out_ptr6 + (r2 + 16 * x3), tmp14, xmask)
tl.store(out_ptr7 + (r2 + 16 * x3), tmp16, xmask)
tl.store(out_ptr8 + (r2 + 16 * x3), tmp18, xmask)
tl.store(out_ptr9 + (r2 + 16 * x3), tmp20, xmask)
tl.store(out_ptr10 + (r2 + 16 * x3), tmp22, xmask)
tl.store(out_ptr11 + (r2 + 16 * x3), tmp24, xmask)
tl.store(out_ptr12 + (r2 + 16 * x3), tmp26, xmask)
tl.store(out_ptr13 + (r2 + 16 * x3), tmp28, xmask)
tl.store(out_ptr14 + (r2 + 16 * x3), tmp30, xmask)
tl.store(out_ptr15 + (r2 + 16 * x3), tmp32, xmask)
tl.store(out_ptr16 + (r2 + 16 * x3), tmp34, xmask)
tl.store(out_ptr17 + (r2 + 16 * x3), tmp36, xmask)
tl.store(out_ptr18 + (r2 + 16 * x3), tmp38, xmask)
tl.store(out_ptr19 + (r2 + 16 * x3), tmp40, xmask)
tl.store(out_ptr20 + (r2 + 16 * x3), tmp42, xmask)
tl.store(out_ptr21 + (r2 + 16 * x3), tmp44, xmask)
tl.store(out_ptr22 + (r2 + 16 * x3), tmp46, xmask)
tl.store(out_ptr23 + (r2 + 16 * x3), tmp48, xmask)
tl.store(out_ptr24 + (r2 + 16 * x3), tmp50, xmask)
tl.store(out_ptr25 + (r2 + 16 * x3), tmp52, xmask)
tl.store(out_ptr26 + (r2 + 16 * x3), tmp54, xmask)
tl.store(out_ptr27 + (r2 + 16 * x3), tmp56, xmask)
tl.store(out_ptr28 + x3, tmp69, xmask)
tl.store(out_ptr29 + x3, tmp78, xmask)
tl.store(out_ptr30 + x3, tmp87, xmask)
tl.store(out_ptr31 + x3, tmp96, xmask)
tl.store(out_ptr32 + x3, tmp105, xmask)
tl.store(out_ptr33 + x3, tmp114, xmask)
tl.store(out_ptr34 + x3, tmp123, xmask)
tl.store(out_ptr35 + x3, tmp132, xmask)
tl.store(out_ptr36 + x3, tmp141, xmask)
tl.store(out_ptr37 + x3, tmp150, xmask)
tl.store(out_ptr38 + x3, tmp159, xmask)
tl.store(out_ptr39 + x3, tmp168, xmask)
tl.store(out_ptr40 + x3, tmp177, xmask)
tl.store(out_ptr41 + x3, tmp186, xmask)
tl.store(out_ptr42 + x3, tmp195, xmask)
tl.store(out_ptr43 + x3, tmp204, xmask)
tl.store(out_ptr44 + x3, tmp213, xmask)
tl.store(out_ptr45 + x3, tmp222, xmask)
tl.store(out_ptr46 + x3, tmp231, xmask)
tl.store(out_ptr47 + x3, tmp240, xmask)
tl.store(out_ptr48 + x3, tmp249, xmask)
tl.store(out_ptr49 + x3, tmp258, xmask)
tl.store(out_ptr50 + x3, tmp267, xmask)
tl.store(out_ptr51 + x3, tmp276, xmask)
tl.store(out_ptr52 + x3, tmp285, xmask)
tl.store(out_ptr53 + x3, tmp294, xmask)
tl.store(out_ptr54 + x3, tmp303, xmask)
tl.store(out_ptr55 + x3, tmp312, xmask)
tl.store(out_ptr56 + x3, tmp321, xmask)
@triton.jit
def triton_per_fused_mul_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12,
out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18,
out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24,
out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30,
out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36,
out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42,
out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48,
out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54,
out_ptr55, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (116 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (120 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (124 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (132 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (136 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (140 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (144 + x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (148 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (152 + x0), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (156 + x0), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr1 + (160 + x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (164 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (168 + x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (172 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (176 + x0), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (180 + x0), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr1 + (184 + x0), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (188 + x0), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr1 + (196 + x0), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr1 + (200 + x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr1 + (204 + x0), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr1 + (208 + x0), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr1 + (212 + x0), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr1 + (216 + x0), xmask, eviction_policy='evict_last')
tmp53 = tl.load(in_ptr1 + (220 + x0), xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr1 + (224 + x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr2 + (464 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp58 = tl.load(in_ptr3 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp61 = tl.load(in_ptr4 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp68 = tl.load(in_ptr2 + (480 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp77 = tl.load(in_ptr2 + (496 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp86 = tl.load(in_ptr2 + (512 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp95 = tl.load(in_ptr2 + (528 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp104 = tl.load(in_ptr2 + (544 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp113 = tl.load(in_ptr2 + (560 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp122 = tl.load(in_ptr2 + (576 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp131 = tl.load(in_ptr2 + (592 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp140 = tl.load(in_ptr2 + (608 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp149 = tl.load(in_ptr2 + (624 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp158 = tl.load(in_ptr2 + (640 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp167 = tl.load(in_ptr2 + (656 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp176 = tl.load(in_ptr2 + (672 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp185 = tl.load(in_ptr2 + (688 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp194 = tl.load(in_ptr2 + (704 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp203 = tl.load(in_ptr2 + (720 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp212 = tl.load(in_ptr2 + (736 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp221 = tl.load(in_ptr2 + (752 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp230 = tl.load(in_ptr2 + (768 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp239 = tl.load(in_ptr2 + (784 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp248 = tl.load(in_ptr2 + (800 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp257 = tl.load(in_ptr2 + (816 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp266 = tl.load(in_ptr2 + (832 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp275 = tl.load(in_ptr2 + (848 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp284 = tl.load(in_ptr2 + (864 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp293 = tl.load(in_ptr2 + (880 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp302 = tl.load(in_ptr2 + (896 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp4 = tmp0 - tmp3
tmp6 = tmp0 - tmp5
tmp8 = tmp0 - tmp7
tmp10 = tmp0 - tmp9
tmp12 = tmp0 - tmp11
tmp14 = tmp0 - tmp13
tmp16 = tmp0 - tmp15
tmp18 = tmp0 - tmp17
tmp20 = tmp0 - tmp19
tmp22 = tmp0 - tmp21
tmp24 = tmp0 - tmp23
tmp26 = tmp0 - tmp25
tmp28 = tmp0 - tmp27
tmp30 = tmp0 - tmp29
tmp32 = tmp0 - tmp31
tmp34 = tmp0 - tmp33
tmp36 = tmp0 - tmp35
tmp38 = tmp0 - tmp37
tmp40 = tmp0 - tmp39
tmp42 = tmp0 - tmp41
tmp44 = tmp0 - tmp43
tmp46 = tmp0 - tmp45
tmp48 = tmp0 - tmp47
tmp50 = tmp0 - tmp49
tmp52 = tmp0 - tmp51
tmp54 = tmp0 - tmp53
tmp56 = tmp0 - tmp55
tmp59 = tmp57 - tmp58
tmp60 = tl_math.exp(tmp59)
tmp62 = tmp60 / tmp61
tmp63 = tmp2 * tmp62
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp66 = tl.where(xmask, tmp64, 0)
tmp67 = tl.sum(tmp66, 1)[:, None]
tmp69 = tmp68 - tmp58
tmp70 = tl_math.exp(tmp69)
tmp71 = tmp70 / tmp61
tmp72 = tmp4 * tmp71
tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK])
tmp75 = tl.where(xmask, tmp73, 0)
tmp76 = tl.sum(tmp75, 1)[:, None]
tmp78 = tmp77 - tmp58
tmp79 = tl_math.exp(tmp78)
tmp80 = tmp79 / tmp61
tmp81 = tmp6 * tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = tl.where(xmask, tmp82, 0)
tmp85 = tl.sum(tmp84, 1)[:, None]
tmp87 = tmp86 - tmp58
tmp88 = tl_math.exp(tmp87)
tmp89 = tmp88 / tmp61
tmp90 = tmp8 * tmp89
tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK])
tmp93 = tl.where(xmask, tmp91, 0)
tmp94 = tl.sum(tmp93, 1)[:, None]
tmp96 = tmp95 - tmp58
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp97 / tmp61
tmp99 = tmp10 * tmp98
tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK])
tmp102 = tl.where(xmask, tmp100, 0)
tmp103 = tl.sum(tmp102, 1)[:, None]
tmp105 = tmp104 - tmp58
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp106 / tmp61
tmp108 = tmp12 * tmp107
tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK])
tmp111 = tl.where(xmask, tmp109, 0)
tmp112 = tl.sum(tmp111, 1)[:, None]
tmp114 = tmp113 - tmp58
tmp115 = tl_math.exp(tmp114)
tmp116 = tmp115 / tmp61
tmp117 = tmp14 * tmp116
tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK])
tmp120 = tl.where(xmask, tmp118, 0)
tmp121 = tl.sum(tmp120, 1)[:, None]
tmp123 = tmp122 - tmp58
tmp124 = tl_math.exp(tmp123)
tmp125 = tmp124 / tmp61
tmp126 = tmp16 * tmp125
tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK])
tmp129 = tl.where(xmask, tmp127, 0)
tmp130 = tl.sum(tmp129, 1)[:, None]
tmp132 = tmp131 - tmp58
tmp133 = tl_math.exp(tmp132)
tmp134 = tmp133 / tmp61
tmp135 = tmp18 * tmp134
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp138 = tl.where(xmask, tmp136, 0)
tmp139 = tl.sum(tmp138, 1)[:, None]
tmp141 = tmp140 - tmp58
tmp142 = tl_math.exp(tmp141)
tmp143 = tmp142 / tmp61
tmp144 = tmp20 * tmp143
tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK])
tmp147 = tl.where(xmask, tmp145, 0)
tmp148 = tl.sum(tmp147, 1)[:, None]
tmp150 = tmp149 - tmp58
tmp151 = tl_math.exp(tmp150)
tmp152 = tmp151 / tmp61
tmp153 = tmp22 * tmp152
tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK])
tmp156 = tl.where(xmask, tmp154, 0)
tmp157 = tl.sum(tmp156, 1)[:, None]
tmp159 = tmp158 - tmp58
tmp160 = tl_math.exp(tmp159)
tmp161 = tmp160 / tmp61
tmp162 = tmp24 * tmp161
tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK])
tmp165 = tl.where(xmask, tmp163, 0)
tmp166 = tl.sum(tmp165, 1)[:, None]
tmp168 = tmp167 - tmp58
tmp169 = tl_math.exp(tmp168)
tmp170 = tmp169 / tmp61
tmp171 = tmp26 * tmp170
tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK])
tmp174 = tl.where(xmask, tmp172, 0)
tmp175 = tl.sum(tmp174, 1)[:, None]
tmp177 = tmp176 - tmp58
tmp178 = tl_math.exp(tmp177)
tmp179 = tmp178 / tmp61
tmp180 = tmp28 * tmp179
tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK])
tmp183 = tl.where(xmask, tmp181, 0)
tmp184 = tl.sum(tmp183, 1)[:, None]
tmp186 = tmp185 - tmp58
tmp187 = tl_math.exp(tmp186)
tmp188 = tmp187 / tmp61
tmp189 = tmp30 * tmp188
tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK])
tmp192 = tl.where(xmask, tmp190, 0)
tmp193 = tl.sum(tmp192, 1)[:, None]
tmp195 = tmp194 - tmp58
tmp196 = tl_math.exp(tmp195)
tmp197 = tmp196 / tmp61
tmp198 = tmp32 * tmp197
tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK])
tmp201 = tl.where(xmask, tmp199, 0)
tmp202 = tl.sum(tmp201, 1)[:, None]
tmp204 = tmp203 - tmp58
tmp205 = tl_math.exp(tmp204)
tmp206 = tmp205 / tmp61
tmp207 = tmp34 * tmp206
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp210 = tl.where(xmask, tmp208, 0)
tmp211 = tl.sum(tmp210, 1)[:, None]
tmp213 = tmp212 - tmp58
tmp214 = tl_math.exp(tmp213)
tmp215 = tmp214 / tmp61
tmp216 = tmp36 * tmp215
tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK])
tmp219 = tl.where(xmask, tmp217, 0)
tmp220 = tl.sum(tmp219, 1)[:, None]
tmp222 = tmp221 - tmp58
tmp223 = tl_math.exp(tmp222)
tmp224 = tmp223 / tmp61
tmp225 = tmp38 * tmp224
tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK])
tmp228 = tl.where(xmask, tmp226, 0)
tmp229 = tl.sum(tmp228, 1)[:, None]
tmp231 = tmp230 - tmp58
tmp232 = tl_math.exp(tmp231)
tmp233 = tmp232 / tmp61
tmp234 = tmp40 * tmp233
tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK])
tmp237 = tl.where(xmask, tmp235, 0)
tmp238 = tl.sum(tmp237, 1)[:, None]
tmp240 = tmp239 - tmp58
tmp241 = tl_math.exp(tmp240)
tmp242 = tmp241 / tmp61
tmp243 = tmp42 * tmp242
tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK])
tmp246 = tl.where(xmask, tmp244, 0)
tmp247 = tl.sum(tmp246, 1)[:, None]
tmp249 = tmp248 - tmp58
tmp250 = tl_math.exp(tmp249)
tmp251 = tmp250 / tmp61
tmp252 = tmp44 * tmp251
tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK])
tmp255 = tl.where(xmask, tmp253, 0)
tmp256 = tl.sum(tmp255, 1)[:, None]
tmp258 = tmp257 - tmp58
tmp259 = tl_math.exp(tmp258)
tmp260 = tmp259 / tmp61
tmp261 = tmp46 * tmp260
tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK])
tmp264 = tl.where(xmask, tmp262, 0)
tmp265 = tl.sum(tmp264, 1)[:, None]
tmp267 = tmp266 - tmp58
tmp268 = tl_math.exp(tmp267)
tmp269 = tmp268 / tmp61
tmp270 = tmp48 * tmp269
tmp271 = tl.broadcast_to(tmp270, [XBLOCK, RBLOCK])
tmp273 = tl.where(xmask, tmp271, 0)
tmp274 = tl.sum(tmp273, 1)[:, None]
tmp276 = tmp275 - tmp58
tmp277 = tl_math.exp(tmp276)
tmp278 = tmp277 / tmp61
tmp279 = tmp50 * tmp278
tmp280 = tl.broadcast_to(tmp279, [XBLOCK, RBLOCK])
tmp282 = tl.where(xmask, tmp280, 0)
tmp283 = tl.sum(tmp282, 1)[:, None]
tmp285 = tmp284 - tmp58
tmp286 = tl_math.exp(tmp285)
tmp287 = tmp286 / tmp61
tmp288 = tmp52 * tmp287
tmp289 = tl.broadcast_to(tmp288, [XBLOCK, RBLOCK])
tmp291 = tl.where(xmask, tmp289, 0)
tmp292 = tl.sum(tmp291, 1)[:, None]
tmp294 = tmp293 - tmp58
tmp295 = tl_math.exp(tmp294)
tmp296 = tmp295 / tmp61
tmp297 = tmp54 * tmp296
tmp298 = tl.broadcast_to(tmp297, [XBLOCK, RBLOCK])
tmp300 = tl.where(xmask, tmp298, 0)
tmp301 = tl.sum(tmp300, 1)[:, None]
tmp303 = tmp302 - tmp58
tmp304 = tl_math.exp(tmp303)
tmp305 = tmp304 / tmp61
tmp306 = tmp56 * tmp305
tmp307 = tl.broadcast_to(tmp306, [XBLOCK, RBLOCK])
tmp309 = tl.where(xmask, tmp307, 0)
tmp310 = tl.sum(tmp309, 1)[:, None]
tl.store(out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp4, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp6, xmask)
tl.store(out_ptr3 + (r2 + 16 * x3), tmp8, xmask)
tl.store(out_ptr4 + (r2 + 16 * x3), tmp10, xmask)
tl.store(out_ptr5 + (r2 + 16 * x3), tmp12, xmask)
tl.store(out_ptr6 + (r2 + 16 * x3), tmp14, xmask)
tl.store(out_ptr7 + (r2 + 16 * x3), tmp16, xmask)
tl.store(out_ptr8 + (r2 + 16 * x3), tmp18, xmask)
tl.store(out_ptr9 + (r2 + 16 * x3), tmp20, xmask)
tl.store(out_ptr10 + (r2 + 16 * x3), tmp22, xmask)
tl.store(out_ptr11 + (r2 + 16 * x3), tmp24, xmask)
tl.store(out_ptr12 + (r2 + 16 * x3), tmp26, xmask)
tl.store(out_ptr13 + (r2 + 16 * x3), tmp28, xmask)
tl.store(out_ptr14 + (r2 + 16 * x3), tmp30, xmask)
tl.store(out_ptr15 + (r2 + 16 * x3), tmp32, xmask)
tl.store(out_ptr16 + (r2 + 16 * x3), tmp34, xmask)
tl.store(out_ptr17 + (r2 + 16 * x3), tmp36, xmask)
tl.store(out_ptr18 + (r2 + 16 * x3), tmp38, xmask)
tl.store(out_ptr19 + (r2 + 16 * x3), tmp40, xmask)
tl.store(out_ptr20 + (r2 + 16 * x3), tmp42, xmask)
tl.store(out_ptr21 + (r2 + 16 * x3), tmp44, xmask)
tl.store(out_ptr22 + (r2 + 16 * x3), tmp46, xmask)
tl.store(out_ptr23 + (r2 + 16 * x3), tmp48, xmask)
tl.store(out_ptr24 + (r2 + 16 * x3), tmp50, xmask)
tl.store(out_ptr25 + (r2 + 16 * x3), tmp52, xmask)
tl.store(out_ptr26 + (r2 + 16 * x3), tmp54, xmask)
tl.store(out_ptr27 + (r2 + 16 * x3), tmp56, xmask)
tl.store(out_ptr28 + x3, tmp67, xmask)
tl.store(out_ptr29 + x3, tmp76, xmask)
tl.store(out_ptr30 + x3, tmp85, xmask)
tl.store(out_ptr31 + x3, tmp94, xmask)
tl.store(out_ptr32 + x3, tmp103, xmask)
tl.store(out_ptr33 + x3, tmp112, xmask)
tl.store(out_ptr34 + x3, tmp121, xmask)
tl.store(out_ptr35 + x3, tmp130, xmask)
tl.store(out_ptr36 + x3, tmp139, xmask)
tl.store(out_ptr37 + x3, tmp148, xmask)
tl.store(out_ptr38 + x3, tmp157, xmask)
tl.store(out_ptr39 + x3, tmp166, xmask)
tl.store(out_ptr40 + x3, tmp175, xmask)
tl.store(out_ptr41 + x3, tmp184, xmask)
tl.store(out_ptr42 + x3, tmp193, xmask)
tl.store(out_ptr43 + x3, tmp202, xmask)
tl.store(out_ptr44 + x3, tmp211, xmask)
tl.store(out_ptr45 + x3, tmp220, xmask)
tl.store(out_ptr46 + x3, tmp229, xmask)
tl.store(out_ptr47 + x3, tmp238, xmask)
tl.store(out_ptr48 + x3, tmp247, xmask)
tl.store(out_ptr49 + x3, tmp256, xmask)
tl.store(out_ptr50 + x3, tmp265, xmask)
tl.store(out_ptr51 + x3, tmp274, xmask)
tl.store(out_ptr52 + x3, tmp283, xmask)
tl.store(out_ptr53 + x3, tmp292, xmask)
tl.store(out_ptr54 + x3, tmp301, xmask)
tl.store(out_ptr55 + x3, tmp310, xmask)
@triton.jit
def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5,
out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12,
out_ptr13, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (228 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (232 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (236 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (240 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (244 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (248 + x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (252 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (912 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.load(in_ptr4 + (r2 + 16 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp26 = tl.load(in_ptr2 + (928 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr2 + (944 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr2 + (960 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp53 = tl.load(in_ptr2 + (976 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp62 = tl.load(in_ptr2 + (992 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp71 = tl.load(in_ptr2 + (1008 + r2 + 1024 * x1), xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp4 = tmp0 - tmp3
tmp6 = tmp0 - tmp5
tmp8 = tmp0 - tmp7
tmp10 = tmp0 - tmp9
tmp12 = tmp0 - tmp11
tmp14 = tmp0 - tmp13
tmp17 = tmp15 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp20 = tmp18 / tmp19
tmp21 = tmp2 * tmp20
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.where(xmask, tmp22, 0)
tmp25 = tl.sum(tmp24, 1)[:, None]
tmp27 = tmp26 - tmp16
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp19
tmp30 = tmp4 * tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.where(xmask, tmp31, 0)
tmp34 = tl.sum(tmp33, 1)[:, None]
tmp36 = tmp35 - tmp16
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp37 / tmp19
tmp39 = tmp6 * tmp38
tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp42 = tl.where(xmask, tmp40, 0)
tmp43 = tl.sum(tmp42, 1)[:, None]
tmp45 = tmp44 - tmp16
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp46 / tmp19
tmp48 = tmp8 * tmp47
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp51 = tl.where(xmask, tmp49, 0)
tmp52 = tl.sum(tmp51, 1)[:, None]
tmp54 = tmp53 - tmp16
tmp55 = tl_math.exp(tmp54)
tmp56 = tmp55 / tmp19
tmp57 = tmp10 * tmp56
tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK])
tmp60 = tl.where(xmask, tmp58, 0)
tmp61 = tl.sum(tmp60, 1)[:, None]
tmp63 = tmp62 - tmp16
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp64 / tmp19
tmp66 = tmp12 * tmp65
tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK])
tmp69 = tl.where(xmask, tmp67, 0)
tmp70 = tl.sum(tmp69, 1)[:, None]
tmp72 = tmp71 - tmp16
tmp73 = tl_math.exp(tmp72)
tmp74 = tmp73 / tmp19
tmp75 = tmp14 * tmp74
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.where(xmask, tmp76, 0)
tmp79 = tl.sum(tmp78, 1)[:, None]
tl.store(out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp4, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp6, xmask)
tl.store(out_ptr3 + (r2 + 16 * x3), tmp8, xmask)
tl.store(out_ptr4 + (r2 + 16 * x3), tmp10, xmask)
tl.store(out_ptr5 + (r2 + 16 * x3), tmp12, xmask)
tl.store(out_ptr6 + (r2 + 16 * x3), tmp14, xmask)
tl.store(out_ptr7 + x3, tmp25, xmask)
tl.store(out_ptr8 + x3, tmp34, xmask)
tl.store(out_ptr9 + x3, tmp43, xmask)
tl.store(out_ptr10 + x3, tmp52, xmask)
tl.store(out_ptr11 + x3, tmp61, xmask)
tl.store(out_ptr12 + x3, tmp70, xmask)
tl.store(out_ptr13 + x3, tmp79, xmask)
@triton.jit
def triton_poi_fused_copy_zeros_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17,
in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24,
in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31,
in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38,
in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45,
in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52,
in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59,
in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 64
x0 = xindex % 4
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 >= tmp7
tmp9 = tmp0 < tmp1
tmp10 = tmp8 & tmp9
tmp11 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp10 & xmask, eviction_policy
='evict_last', other=0.0)
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 >= tmp12
tmp14 = tmp0 < tmp7
tmp15 = tmp13 & tmp14
tmp16 = tl.load(in_ptr2 + (x0 + 4 * x2), tmp15 & xmask, eviction_policy
='evict_last', other=0.0)
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp0 >= tmp17
tmp19 = tmp0 < tmp12
tmp20 = tmp18 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + 4 * x2), tmp20 & xmask, eviction_policy
='evict_last', other=0.0)
tmp22 = tmp0 < tmp17
tmp23 = tl.load(in_ptr4 + (x0 + 4 * x2), tmp22 & xmask, eviction_policy
='evict_last', other=0.0)
tmp24 = 0.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tl.where(tmp20, tmp21, tmp25)
tmp27 = tl.where(tmp15, tmp16, tmp26)
tmp28 = tl.where(tmp10, tmp11, tmp27)
tmp29 = tl.where(tmp5, tmp6, tmp28)
tmp30 = tl.full([1], 8, tl.int64)
tmp31 = tmp0 >= tmp30
tmp32 = tl.full([1], 9, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr5 + (x0 + 4 * x2), tmp34 & xmask, eviction_policy
='evict_last', other=0.0)
tmp36 = tl.full([1], 7, tl.int64)
tmp37 = tmp0 >= tmp36
tmp38 = tmp0 < tmp30
tmp39 = tmp37 & tmp38
tmp40 = tl.load(in_ptr6 + (x0 + 4 * x2), tmp39 & xmask, eviction_policy
='evict_last', other=0.0)
tmp41 = tl.full([1], 6, tl.int64)
tmp42 = tmp0 >= tmp41
tmp43 = tmp0 < tmp36
tmp44 = tmp42 & tmp43
tmp45 = tl.load(in_ptr7 + (x0 + 4 * x2), tmp44 & xmask, eviction_policy
='evict_last', other=0.0)
tmp46 = tmp0 >= tmp3
tmp47 = tmp0 < tmp41
tmp48 = tmp46 & tmp47
tmp49 = tl.load(in_ptr8 + (x0 + 4 * x2), tmp48 & xmask, eviction_policy
='evict_last', other=0.0)
tmp50 = tl.where(tmp48, tmp49, tmp29)
tmp51 = tl.where(tmp44, tmp45, tmp50)
tmp52 = tl.where(tmp39, tmp40, tmp51)
tmp53 = tl.where(tmp34, tmp35, tmp52)
tmp54 = tl.full([1], 12, tl.int64)
tmp55 = tmp0 >= tmp54
tmp56 = tl.full([1], 13, tl.int64)
tmp57 = tmp0 < tmp56
tmp58 = tmp55 & tmp57
tmp59 = tl.load(in_ptr9 + (x0 + 4 * x2), tmp58 & xmask, eviction_policy
='evict_last', other=0.0)
tmp60 = tl.full([1], 11, tl.int64)
tmp61 = tmp0 >= tmp60
tmp62 = tmp0 < tmp54
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr10 + (x0 + 4 * x2), tmp63 & xmask,
eviction_policy='evict_last', other=0.0)
tmp65 = tl.full([1], 10, tl.int64)
tmp66 = tmp0 >= tmp65
tmp67 = tmp0 < tmp60
tmp68 = tmp66 & tmp67
tmp69 = tl.load(in_ptr11 + (x0 + 4 * x2), tmp68 & xmask,
eviction_policy='evict_last', other=0.0)
tmp70 = tmp0 >= tmp32
tmp71 = tmp0 < tmp65
tmp72 = tmp70 & tmp71
tmp73 = tl.load(in_ptr12 + (x0 + 4 * x2), tmp72 & xmask,
eviction_policy='evict_last', other=0.0)
tmp74 = tl.where(tmp72, tmp73, tmp53)
tmp75 = tl.where(tmp68, tmp69, tmp74)
tmp76 = tl.where(tmp63, tmp64, tmp75)
tmp77 = tl.where(tmp58, tmp59, tmp76)
tmp78 = tl.full([1], 16, tl.int64)
tmp79 = tmp0 >= tmp78
tmp80 = tl.full([1], 17, tl.int64)
tmp81 = tmp0 < tmp80
tmp82 = tmp79 & tmp81
tmp83 = tl.load(in_ptr13 + (x0 + 4 * x2), tmp82 & xmask,
eviction_policy='evict_last', other=0.0)
tmp84 = tl.full([1], 15, tl.int64)
tmp85 = tmp0 >= tmp84
tmp86 = tmp0 < tmp78
tmp87 = tmp85 & tmp86
tmp88 = tl.load(in_ptr14 + (x0 + 4 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full([1], 14, tl.int64)
tmp90 = tmp0 >= tmp89
tmp91 = tmp0 < tmp84
tmp92 = tmp90 & tmp91
tmp93 = tl.load(in_ptr15 + (x0 + 4 * x2), tmp92 & xmask,
eviction_policy='evict_last', other=0.0)
tmp94 = tmp0 >= tmp56
tmp95 = tmp0 < tmp89
tmp96 = tmp94 & tmp95
tmp97 = tl.load(in_ptr16 + (x0 + 4 * x2), tmp96 & xmask,
eviction_policy='evict_last', other=0.0)
tmp98 = tl.where(tmp96, tmp97, tmp77)
tmp99 = tl.where(tmp92, tmp93, tmp98)
tmp100 = tl.where(tmp87, tmp88, tmp99)
tmp101 = tl.where(tmp82, tmp83, tmp100)
tmp102 = tl.full([1], 20, tl.int64)
tmp103 = tmp0 >= tmp102
tmp104 = tl.full([1], 21, tl.int64)
tmp105 = tmp0 < tmp104
tmp106 = tmp103 & tmp105
tmp107 = tl.load(in_ptr17 + (x0 + 4 * x2), tmp106 & xmask,
eviction_policy='evict_last', other=0.0)
tmp108 = tl.full([1], 19, tl.int64)
tmp109 = tmp0 >= tmp108
tmp110 = tmp0 < tmp102
tmp111 = tmp109 & tmp110
tmp112 = tl.load(in_ptr18 + (x0 + 4 * x2), tmp111 & xmask,
eviction_policy='evict_last', other=0.0)
tmp113 = tl.full([1], 18, tl.int64)
tmp114 = tmp0 >= tmp113
tmp115 = tmp0 < tmp108
tmp116 = tmp114 & tmp115
tmp117 = tl.load(in_ptr19 + (x0 + 4 * x2), tmp116 & xmask,
eviction_policy='evict_last', other=0.0)
tmp118 = tmp0 >= tmp80
tmp119 = tmp0 < tmp113
tmp120 = tmp118 & tmp119
tmp121 = tl.load(in_ptr20 + (x0 + 4 * x2), tmp120 & xmask,
eviction_policy='evict_last', other=0.0)
tmp122 = tl.where(tmp120, tmp121, tmp101)
tmp123 = tl.where(tmp116, tmp117, tmp122)
tmp124 = tl.where(tmp111, tmp112, tmp123)
tmp125 = tl.where(tmp106, tmp107, tmp124)
tmp126 = tl.full([1], 24, tl.int64)
tmp127 = tmp0 >= tmp126
tmp128 = tl.full([1], 25, tl.int64)
tmp129 = tmp0 < tmp128
tmp130 = tmp127 & tmp129
tmp131 = tl.load(in_ptr21 + (x0 + 4 * x2), tmp130 & xmask,
eviction_policy='evict_last', other=0.0)
tmp132 = tl.full([1], 23, tl.int64)
tmp133 = tmp0 >= tmp132
tmp134 = tmp0 < tmp126
tmp135 = tmp133 & tmp134
tmp136 = tl.load(in_ptr22 + (x0 + 4 * x2), tmp135 & xmask,
eviction_policy='evict_last', other=0.0)
tmp137 = tl.full([1], 22, tl.int64)
tmp138 = tmp0 >= tmp137
tmp139 = tmp0 < tmp132
tmp140 = tmp138 & tmp139
tmp141 = tl.load(in_ptr23 + (x0 + 4 * x2), tmp140 & xmask,
eviction_policy='evict_last', other=0.0)
tmp142 = tmp0 >= tmp104
tmp143 = tmp0 < tmp137
tmp144 = tmp142 & tmp143
tmp145 = tl.load(in_ptr24 + (x0 + 4 * x2), tmp144 & xmask,
eviction_policy='evict_last', other=0.0)
tmp146 = tl.where(tmp144, tmp145, tmp125)
tmp147 = tl.where(tmp140, tmp141, tmp146)
tmp148 = tl.where(tmp135, tmp136, tmp147)
tmp149 = tl.where(tmp130, tmp131, tmp148)
tmp150 = tl.full([1], 28, tl.int64)
tmp151 = tmp0 >= tmp150
tmp152 = tl.full([1], 29, tl.int64)
tmp153 = tmp0 < tmp152
tmp154 = tmp151 & tmp153
tmp155 = tl.load(in_ptr25 + (x0 + 4 * x2), tmp154 & xmask,
eviction_policy='evict_last', other=0.0)
tmp156 = tl.full([1], 27, tl.int64)
tmp157 = tmp0 >= tmp156
tmp158 = tmp0 < tmp150
tmp159 = tmp157 & tmp158
tmp160 = tl.load(in_ptr26 + (x0 + 4 * x2), tmp159 & xmask,
eviction_policy='evict_last', other=0.0)
tmp161 = tl.full([1], 26, tl.int64)
tmp162 = tmp0 >= tmp161
tmp163 = tmp0 < tmp156
tmp164 = tmp162 & tmp163
tmp165 = tl.load(in_ptr27 + (x0 + 4 * x2), tmp164 & xmask,
eviction_policy='evict_last', other=0.0)
tmp166 = tmp0 >= tmp128
tmp167 = tmp0 < tmp161
tmp168 = tmp166 & tmp167
tmp169 = tl.load(in_ptr28 + (x0 + 4 * x2), tmp168 & xmask,
eviction_policy='evict_last', other=0.0)
tmp170 = tl.where(tmp168, tmp169, tmp149)
tmp171 = tl.where(tmp164, tmp165, tmp170)
tmp172 = tl.where(tmp159, tmp160, tmp171)
tmp173 = tl.where(tmp154, tmp155, tmp172)
tmp174 = tl.full([1], 32, tl.int64)
tmp175 = tmp0 >= tmp174
tmp176 = tl.full([1], 33, tl.int64)
tmp177 = tmp0 < tmp176
tmp178 = tmp175 & tmp177
tmp179 = tl.load(in_ptr29 + (x0 + 4 * x2), tmp178 & xmask,
eviction_policy='evict_last', other=0.0)
tmp180 = tl.full([1], 31, tl.int64)
tmp181 = tmp0 >= tmp180
tmp182 = tmp0 < tmp174
tmp183 = tmp181 & tmp182
tmp184 = tl.load(in_ptr30 + (x0 + 4 * x2), tmp183 & xmask,
eviction_policy='evict_last', other=0.0)
tmp185 = tl.full([1], 30, tl.int64)
tmp186 = tmp0 >= tmp185
tmp187 = tmp0 < tmp180
tmp188 = tmp186 & tmp187
tmp189 = tl.load(in_ptr31 + (x0 + 4 * x2), tmp188 & xmask,
eviction_policy='evict_last', other=0.0)
tmp190 = tmp0 >= tmp152
tmp191 = tmp0 < tmp185
tmp192 = tmp190 & tmp191
tmp193 = tl.load(in_ptr32 + (x0 + 4 * x2), tmp192 & xmask,
eviction_policy='evict_last', other=0.0)
tmp194 = tl.where(tmp192, tmp193, tmp173)
tmp195 = tl.where(tmp188, tmp189, tmp194)
tmp196 = tl.where(tmp183, tmp184, tmp195)
tmp197 = tl.where(tmp178, tmp179, tmp196)
tmp198 = tl.full([1], 36, tl.int64)
tmp199 = tmp0 >= tmp198
tmp200 = tl.full([1], 37, tl.int64)
tmp201 = tmp0 < tmp200
tmp202 = tmp199 & tmp201
tmp203 = tl.load(in_ptr33 + (x0 + 4 * x2), tmp202 & xmask,
eviction_policy='evict_last', other=0.0)
tmp204 = tl.full([1], 35, tl.int64)
tmp205 = tmp0 >= tmp204
tmp206 = tmp0 < tmp198
tmp207 = tmp205 & tmp206
tmp208 = tl.load(in_ptr34 + (x0 + 4 * x2), tmp207 & xmask,
eviction_policy='evict_last', other=0.0)
tmp209 = tl.full([1], 34, tl.int64)
tmp210 = tmp0 >= tmp209
tmp211 = tmp0 < tmp204
tmp212 = tmp210 & tmp211
tmp213 = tl.load(in_ptr35 + (x0 + 4 * x2), tmp212 & xmask,
eviction_policy='evict_last', other=0.0)
tmp214 = tmp0 >= tmp176
tmp215 = tmp0 < tmp209
tmp216 = tmp214 & tmp215
tmp217 = tl.load(in_ptr36 + (x0 + 4 * x2), tmp216 & xmask,
eviction_policy='evict_last', other=0.0)
tmp218 = tl.where(tmp216, tmp217, tmp197)
tmp219 = tl.where(tmp212, tmp213, tmp218)
tmp220 = tl.where(tmp207, tmp208, tmp219)
tmp221 = tl.where(tmp202, tmp203, tmp220)
tmp222 = tl.full([1], 40, tl.int64)
tmp223 = tmp0 >= tmp222
tmp224 = tl.full([1], 41, tl.int64)
tmp225 = tmp0 < tmp224
tmp226 = tmp223 & tmp225
tmp227 = tl.load(in_ptr37 + (x0 + 4 * x2), tmp226 & xmask,
eviction_policy='evict_last', other=0.0)
tmp228 = tl.full([1], 39, tl.int64)
tmp229 = tmp0 >= tmp228
tmp230 = tmp0 < tmp222
tmp231 = tmp229 & tmp230
tmp232 = tl.load(in_ptr38 + (x0 + 4 * x2), tmp231 & xmask,
eviction_policy='evict_last', other=0.0)
tmp233 = tl.full([1], 38, tl.int64)
tmp234 = tmp0 >= tmp233
tmp235 = tmp0 < tmp228
tmp236 = tmp234 & tmp235
tmp237 = tl.load(in_ptr39 + (x0 + 4 * x2), tmp236 & xmask,
eviction_policy='evict_last', other=0.0)
tmp238 = tmp0 >= tmp200
tmp239 = tmp0 < tmp233
tmp240 = tmp238 & tmp239
tmp241 = tl.load(in_ptr40 + (x0 + 4 * x2), tmp240 & xmask,
eviction_policy='evict_last', other=0.0)
tmp242 = tl.where(tmp240, tmp241, tmp221)
tmp243 = tl.where(tmp236, tmp237, tmp242)
tmp244 = tl.where(tmp231, tmp232, tmp243)
tmp245 = tl.where(tmp226, tmp227, tmp244)
tmp246 = tl.full([1], 44, tl.int64)
tmp247 = tmp0 >= tmp246
tmp248 = tl.full([1], 45, tl.int64)
tmp249 = tmp0 < tmp248
tmp250 = tmp247 & tmp249
tmp251 = tl.load(in_ptr41 + (x0 + 4 * x2), tmp250 & xmask,
eviction_policy='evict_last', other=0.0)
tmp252 = tl.full([1], 43, tl.int64)
tmp253 = tmp0 >= tmp252
tmp254 = tmp0 < tmp246
tmp255 = tmp253 & tmp254
tmp256 = tl.load(in_ptr42 + (x0 + 4 * x2), tmp255 & xmask,
eviction_policy='evict_last', other=0.0)
tmp257 = tl.full([1], 42, tl.int64)
tmp258 = tmp0 >= tmp257
tmp259 = tmp0 < tmp252
tmp260 = tmp258 & tmp259
tmp261 = tl.load(in_ptr43 + (x0 + 4 * x2), tmp260 & xmask,
eviction_policy='evict_last', other=0.0)
tmp262 = tmp0 >= tmp224
tmp263 = tmp0 < tmp257
tmp264 = tmp262 & tmp263
tmp265 = tl.load(in_ptr44 + (x0 + 4 * x2), tmp264 & xmask,
eviction_policy='evict_last', other=0.0)
tmp266 = tl.where(tmp264, tmp265, tmp245)
tmp267 = tl.where(tmp260, tmp261, tmp266)
tmp268 = tl.where(tmp255, tmp256, tmp267)
tmp269 = tl.where(tmp250, tmp251, tmp268)
tmp270 = tl.full([1], 48, tl.int64)
tmp271 = tmp0 >= tmp270
tmp272 = tl.full([1], 49, tl.int64)
tmp273 = tmp0 < tmp272
tmp274 = tmp271 & tmp273
tmp275 = tl.load(in_ptr45 + (x0 + 4 * x2), tmp274 & xmask,
eviction_policy='evict_last', other=0.0)
tmp276 = tl.full([1], 47, tl.int64)
tmp277 = tmp0 >= tmp276
tmp278 = tmp0 < tmp270
tmp279 = tmp277 & tmp278
tmp280 = tl.load(in_ptr46 + (x0 + 4 * x2), tmp279 & xmask,
eviction_policy='evict_last', other=0.0)
tmp281 = tl.full([1], 46, tl.int64)
tmp282 = tmp0 >= tmp281
tmp283 = tmp0 < tmp276
tmp284 = tmp282 & tmp283
tmp285 = tl.load(in_ptr47 + (x0 + 4 * x2), tmp284 & xmask,
eviction_policy='evict_last', other=0.0)
tmp286 = tmp0 >= tmp248
tmp287 = tmp0 < tmp281
tmp288 = tmp286 & tmp287
tmp289 = tl.load(in_ptr48 + (x0 + 4 * x2), tmp288 & xmask,
eviction_policy='evict_last', other=0.0)
tmp290 = tl.where(tmp288, tmp289, tmp269)
tmp291 = tl.where(tmp284, tmp285, tmp290)
tmp292 = tl.where(tmp279, tmp280, tmp291)
tmp293 = tl.where(tmp274, tmp275, tmp292)
tmp294 = tl.full([1], 52, tl.int64)
tmp295 = tmp0 >= tmp294
tmp296 = tl.full([1], 53, tl.int64)
tmp297 = tmp0 < tmp296
tmp298 = tmp295 & tmp297
tmp299 = tl.load(in_ptr49 + (x0 + 4 * x2), tmp298 & xmask,
eviction_policy='evict_last', other=0.0)
tmp300 = tl.full([1], 51, tl.int64)
tmp301 = tmp0 >= tmp300
tmp302 = tmp0 < tmp294
tmp303 = tmp301 & tmp302
tmp304 = tl.load(in_ptr50 + (x0 + 4 * x2), tmp303 & xmask,
eviction_policy='evict_last', other=0.0)
tmp305 = tl.full([1], 50, tl.int64)
tmp306 = tmp0 >= tmp305
tmp307 = tmp0 < tmp300
tmp308 = tmp306 & tmp307
tmp309 = tl.load(in_ptr51 + (x0 + 4 * x2), tmp308 & xmask,
eviction_policy='evict_last', other=0.0)
tmp310 = tmp0 >= tmp272
tmp311 = tmp0 < tmp305
tmp312 = tmp310 & tmp311
tmp313 = tl.load(in_ptr52 + (x0 + 4 * x2), tmp312 & xmask,
eviction_policy='evict_last', other=0.0)
tmp314 = tl.where(tmp312, tmp313, tmp293)
tmp315 = tl.where(tmp308, tmp309, tmp314)
tmp316 = tl.where(tmp303, tmp304, tmp315)
tmp317 = tl.where(tmp298, tmp299, tmp316)
tmp318 = tl.full([1], 56, tl.int64)
tmp319 = tmp0 >= tmp318
tmp320 = tl.full([1], 57, tl.int64)
tmp321 = tmp0 < tmp320
tmp322 = tmp319 & tmp321
tmp323 = tl.load(in_ptr53 + (x0 + 4 * x2), tmp322 & xmask,
eviction_policy='evict_last', other=0.0)
tmp324 = tl.full([1], 55, tl.int64)
tmp325 = tmp0 >= tmp324
tmp326 = tmp0 < tmp318
tmp327 = tmp325 & tmp326
tmp328 = tl.load(in_ptr54 + (x0 + 4 * x2), tmp327 & xmask,
eviction_policy='evict_last', other=0.0)
tmp329 = tl.full([1], 54, tl.int64)
tmp330 = tmp0 >= tmp329
tmp331 = tmp0 < tmp324
tmp332 = tmp330 & tmp331
tmp333 = tl.load(in_ptr55 + (x0 + 4 * x2), tmp332 & xmask,
eviction_policy='evict_last', other=0.0)
tmp334 = tmp0 >= tmp296
tmp335 = tmp0 < tmp329
tmp336 = tmp334 & tmp335
tmp337 = tl.load(in_ptr56 + (x0 + 4 * x2), tmp336 & xmask,
eviction_policy='evict_last', other=0.0)
tmp338 = tl.where(tmp336, tmp337, tmp317)
tmp339 = tl.where(tmp332, tmp333, tmp338)
tmp340 = tl.where(tmp327, tmp328, tmp339)
tmp341 = tl.where(tmp322, tmp323, tmp340)
tmp342 = tl.full([1], 60, tl.int64)
tmp343 = tmp0 >= tmp342
tmp344 = tl.full([1], 61, tl.int64)
tmp345 = tmp0 < tmp344
tmp346 = tmp343 & tmp345
tmp347 = tl.load(in_ptr57 + (x0 + 4 * x2), tmp346 & xmask,
eviction_policy='evict_last', other=0.0)
tmp348 = tl.full([1], 59, tl.int64)
tmp349 = tmp0 >= tmp348
tmp350 = tmp0 < tmp342
tmp351 = tmp349 & tmp350
tmp352 = tl.load(in_ptr58 + (x0 + 4 * x2), tmp351 & xmask,
eviction_policy='evict_last', other=0.0)
tmp353 = tl.full([1], 58, tl.int64)
tmp354 = tmp0 >= tmp353
tmp355 = tmp0 < tmp348
tmp356 = tmp354 & tmp355
tmp357 = tl.load(in_ptr59 + (x0 + 4 * x2), tmp356 & xmask,
eviction_policy='evict_last', other=0.0)
tmp358 = tmp0 >= tmp320
tmp359 = tmp0 < tmp353
tmp360 = tmp358 & tmp359
tmp361 = tl.load(in_ptr60 + (x0 + 4 * x2), tmp360 & xmask,
eviction_policy='evict_last', other=0.0)
tmp362 = tl.where(tmp360, tmp361, tmp341)
tmp363 = tl.where(tmp356, tmp357, tmp362)
tmp364 = tl.where(tmp351, tmp352, tmp363)
tmp365 = tl.where(tmp346, tmp347, tmp364)
tmp366 = tl.full([1], 63, tl.int64)
tmp367 = tmp0 >= tmp366
tmp368 = tl.load(in_ptr61 + (x0 + 4 * x2), tmp367 & xmask,
eviction_policy='evict_last', other=0.0)
tmp369 = tl.full([1], 62, tl.int64)
tmp370 = tmp0 >= tmp369
tmp371 = tmp0 < tmp366
tmp372 = tmp370 & tmp371
tmp373 = tl.load(in_ptr62 + (x0 + 4 * x2), tmp372 & xmask,
eviction_policy='evict_last', other=0.0)
tmp374 = tmp0 >= tmp344
tmp375 = tmp0 < tmp369
tmp376 = tmp374 & tmp375
tmp377 = tl.load(in_ptr63 + (x0 + 4 * x2), tmp376 & xmask,
eviction_policy='evict_last', other=0.0)
tmp378 = tl.where(tmp376, tmp377, tmp365)
tmp379 = tl.where(tmp372, tmp373, tmp378)
tmp380 = tl.where(tmp367, tmp368, tmp379)
tl.store(in_out_ptr0 + x3, tmp380, xmask)
@triton.jit
def triton_red_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.
constexpr):
xnumel = 4
rnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
r2 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r3 + 256 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.load(in_ptr0 + (4 * r2 + 256 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (1 + 4 * r2 + 256 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 + 4 * r2 + 256 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr0 + (3 + 4 * r2 + 256 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = _tmp18 + tmp17
_tmp18 = tl.where(rmask & xmask, tmp19, _tmp18)
tl.store(out_ptr0 + (r3 + 256 * x0), tmp15, rmask & xmask)
tmp18 = tl.sum(_tmp18, 1)[:, None]
tmp20 = libdevice.sqrt(tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp21 = tl.load(out_ptr0 + (r3 + 256 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp22 = 1e-12
tmp23 = triton_helpers.maximum(tmp20, tmp22)
tmp24 = tmp21 / tmp23
tl.store(out_ptr1 + (r3 + 256 * x0), tmp24, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (64, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
buf2 = empty_strided_cuda((4, 1, 16), (16, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(64)](buf0, buf1, buf2, 64, 64,
XBLOCK=32, num_warps=8, num_stages=1)
buf4 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf6 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf8 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf10 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf13 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf17 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf19 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf22 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf24 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf26 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf28 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf31 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf33 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf35 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf37 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf40 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf42 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf44 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf46 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf49 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf51 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf53 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf55 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf58 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf60 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf62 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf64 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf3 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf9 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf14 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf18 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf20 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf23 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf25 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf27 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf29 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf32 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf34 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf36 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf38 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf41 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf43 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf45 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf47 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf50 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf52 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf54 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf56 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf59 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf61 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf63 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf65 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_per_fused_mul_sub_sum_1[grid(16)](primals_1, primals_3, buf0,
buf1, buf2, buf4, buf6, buf8, buf10, buf13, buf15, buf17, buf19,
buf22, buf24, buf26, buf28, buf31, buf33, buf35, buf37, buf40,
buf42, buf44, buf46, buf49, buf51, buf53, buf55, buf58, buf60,
buf62, buf64, buf3, buf5, buf7, buf9, buf11, buf14, buf16,
buf18, buf20, buf23, buf25, buf27, buf29, buf32, buf34, buf36,
buf38, buf41, buf43, buf45, buf47, buf50, buf52, buf54, buf56,
buf59, buf61, buf63, buf65, 16, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf67 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf69 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf71 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf73 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf76 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf78 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf80 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf82 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf85 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf87 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf89 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf91 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf94 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf96 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf98 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf100 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf103 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf105 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf107 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf109 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf112 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf114 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf116 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf118 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf121 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf123 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf125 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf127 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf68 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf70 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf72 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf74 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf77 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf79 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf81 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf83 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf86 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf88 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf90 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf92 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf95 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf97 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf99 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf101 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf104 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf106 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf108 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf110 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf113 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf115 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf117 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf119 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf122 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf124 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf126 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf128 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_per_fused_mul_sub_sum_2[grid(16)](primals_1, primals_3, buf0,
buf1, buf2, buf67, buf69, buf71, buf73, buf76, buf78, buf80,
buf82, buf85, buf87, buf89, buf91, buf94, buf96, buf98, buf100,
buf103, buf105, buf107, buf109, buf112, buf114, buf116, buf118,
buf121, buf123, buf125, buf127, buf68, buf70, buf72, buf74,
buf77, buf79, buf81, buf83, buf86, buf88, buf90, buf92, buf95,
buf97, buf99, buf101, buf104, buf106, buf108, buf110, buf113,
buf115, buf117, buf119, buf122, buf124, buf126, buf128, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
buf130 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf132 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf134 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf136 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf139 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf141 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf143 = empty_strided_cuda((4, 1, 4, 16), (64, 256, 16, 1), torch.
float32)
buf131 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf133 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf135 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf137 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf140 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf142 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf144 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_per_fused_mul_sub_sum_3[grid(16)](primals_1, primals_3, buf0,
buf1, buf2, buf130, buf132, buf134, buf136, buf139, buf141,
buf143, buf131, buf133, buf135, buf137, buf140, buf142, buf144,
16, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32)
buf21 = buf12
del buf12
buf30 = buf21
del buf21
buf39 = buf30
del buf30
buf48 = buf39
del buf39
buf57 = buf48
del buf48
buf66 = buf57
del buf57
buf75 = buf66
del buf66
buf84 = buf75
del buf75
buf93 = buf84
del buf84
buf102 = buf93
del buf93
buf111 = buf102
del buf102
buf120 = buf111
del buf111
buf129 = buf120
del buf120
buf138 = buf129
del buf129
buf145 = buf138
del buf138
triton_poi_fused_copy_zeros_4[grid(1024)](buf145, buf11, buf9, buf7,
buf5, buf3, buf20, buf18, buf16, buf14, buf29, buf27, buf25,
buf23, buf38, buf36, buf34, buf32, buf47, buf45, buf43, buf41,
buf56, buf54, buf52, buf50, buf65, buf63, buf61, buf59, buf74,
buf72, buf70, buf68, buf83, buf81, buf79, buf77, buf92, buf90,
buf88, buf86, buf101, buf99, buf97, buf95, buf110, buf108,
buf106, buf104, buf119, buf117, buf115, buf113, buf128, buf126,
buf124, buf122, buf137, buf135, buf133, buf131, buf144, buf142,
buf140, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf101
del buf104
del buf106
del buf108
del buf11
del buf110
del buf113
del buf115
del buf117
del buf119
del buf122
del buf124
del buf126
del buf128
del buf131
del buf133
del buf135
del buf137
del buf14
del buf140
del buf142
del buf144
del buf16
del buf18
del buf20
del buf23
del buf25
del buf27
del buf29
del buf3
del buf32
del buf34
del buf36
del buf38
del buf41
del buf43
del buf45
del buf47
del buf5
del buf50
del buf52
del buf54
del buf56
del buf59
del buf61
del buf63
del buf65
del buf68
del buf7
del buf70
del buf72
del buf74
del buf77
del buf79
del buf81
del buf83
del buf86
del buf88
del buf9
del buf90
del buf92
del buf95
del buf97
del buf99
buf146 = empty_strided_cuda((4, 64, 4), (256, 4, 1), torch.float32)
buf147 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf148 = reinterpret_tensor(buf147, (4, 1), (1, 1), 0)
del buf147
buf149 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_red_fused_div_linalg_vector_norm_5[grid(4)](buf148, buf145,
buf146, buf149, 4, 256, XBLOCK=1, RBLOCK=256, num_warps=2,
num_stages=1)
del buf146
return (buf149, primals_1, primals_2, buf0, buf1, buf2,
reinterpret_tensor(primals_3, (1, 4), (4, 1), 0), buf4, buf6, buf8,
buf10, buf13, buf15, buf17, buf19, buf22, buf24, buf26, buf28,
buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf49,
buf51, buf53, buf55, buf58, buf60, buf62, buf64, buf67, buf69,
buf71, buf73, buf76, buf78, buf80, buf82, buf85, buf87, buf89,
buf91, buf94, buf96, buf98, buf100, buf103, buf105, buf107, buf109,
buf112, buf114, buf116, buf118, buf121, buf123, buf125, buf127,
buf130, buf132, buf134, buf136, buf139, buf141, buf143, buf145, buf148)
class NetVLADNew(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, dim, num_clusters=64):
"""
Args:
dim : int
Dimension of descriptors
num_clusters : int
The number of clusters
"""
super(NetVLADNew, self).__init__()
self.num_clusters = num_clusters
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=False
)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
clsts_assign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clsts_assign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(alpha *
clsts_assign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
def forward(self, input_0):
primals_3 = self.centroids
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| lulor/project_vg | NetVLAD | false | 7,269 | [
"MIT"
] | 1 | 27b0c3b3038c5a666dde516a0a265ae8ddf2059f | https://github.com/lulor/project_vg/tree/27b0c3b3038c5a666dde516a0a265ae8ddf2059f | import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, dim, num_clusters=64):
"""
Args:
dim : int
Dimension of descriptors
num_clusters : int
The number of clusters
"""
super().__init__()
self.num_clusters = num_clusters
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=False
)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
clsts_assign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
dots = np.dot(clsts_assign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :]
alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(alpha *
clsts_assign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
def forward(self, x, crm=None):
N, C = x.shape[:2]
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
if crm is not None:
assert crm.shape[0] == N and crm.shape[1] == 1 and crm.shape[2:
] == x.shape[2:]
soft_assign = torch.mul(soft_assign, crm.view(N, 1, -1))
x_flatten = x.view(N, C, -1)
vlad = torch.zeros((N, self.num_clusters, C), dtype=x.dtype, layout
=x.layout, device=x.device)
for c in range(self.num_clusters):
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3
) - self.centroids[c:c + 1, :].expand(x_flatten.size(-1), -
1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:, c:c + 1, :].unsqueeze(2)
vlad[:, c:c + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2)
vlad = vlad.view(N, -1)
vlad = F.normalize(vlad, p=2, dim=1)
return vlad
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
DuelingNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# h1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nl/cnlp53tjaaclmkilyizp32cbhjp6ctd3j4psucie664opwp5nivh.py
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# output => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %addmm_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %expand_1), kwargs = {})
triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp6 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = tmp5 - tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [h2], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [adv], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, output], Original ATen: [aten.add, aten.sub]
triton_poi_fused_add_sub_1.run(buf5, primals_9, buf4, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del buf5
del primals_9
return (buf6, primals_3, buf1, buf3, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class DuelingNet(nn.Module):
def __init__(self, n_in, n_mid, n_out):
super(DuelingNet, self).__init__()
self.fc1 = nn.Linear(n_in, n_mid)
self.fc2 = nn.Linear(n_mid, n_mid)
self.fc3_adv = nn.Linear(n_mid, n_out)
self.fc3_val = nn.Linear(n_mid, 1)
def forward(self, x):
h1 = F.relu(self.fc1(x))
h2 = F.relu(self.fc2(h1))
adv = self.fc3_adv(h2)
val = self.fc3_val(h2).expand(-1, adv.size(1))
output = val + adv - adv.mean(1, keepdim=True).expand(-1, adv.size(1))
return output
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_mid': 4, 'n_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp6 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = tmp5 - tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, primals_2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(16)](buf3, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (4, 1), (1, 4
), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_sub_1[grid(16)](buf5, primals_9, buf4, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del buf5
del primals_9
return buf6, primals_3, buf1, buf3, primals_8, primals_6, primals_4
class DuelingNetNew(nn.Module):
def __init__(self, n_in, n_mid, n_out):
super(DuelingNetNew, self).__init__()
self.fc1 = nn.Linear(n_in, n_mid)
self.fc2 = nn.Linear(n_mid, n_mid)
self.fc3_adv = nn.Linear(n_mid, n_out)
self.fc3_val = nn.Linear(n_mid, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_3 = self.fc2.weight
primals_5 = self.fc2.bias
primals_4 = self.fc3_adv.weight
primals_7 = self.fc3_adv.bias
primals_8 = self.fc3_val.weight
primals_9 = self.fc3_val.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| moriaki3193/Torch26 | DuelingNet | false | 7,271 | [
"MIT"
] | 1 | fb75f6b6bb07c63fedb03fad7b647837eb40db2e | https://github.com/moriaki3193/Torch26/tree/fb75f6b6bb07c63fedb03fad7b647837eb40db2e | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_in, n_mid, n_out):
super().__init__()
self.fc1 = nn.Linear(n_in, n_mid)
self.fc2 = nn.Linear(n_mid, n_mid)
self.fc3_adv = nn.Linear(n_mid, n_out)
self.fc3_val = nn.Linear(n_mid, 1)
def forward(self, x):
h1 = F.relu(self.fc1(x))
h2 = F.relu(self.fc2(h1))
adv = self.fc3_adv(h2)
val = self.fc3_val(h2).expand(-1, adv.size(1))
output = val + adv - adv.mean(1, keepdim=True).expand(-1, adv.size(1))
return output
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
AveragePooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/4d/c4dx5dtglp5hpi3omo5xmukglcgv7f2ug2u4gm65rtchytndj27z.py
# Topologically Sorted Source Nodes: [masked_fill_, x_sum, x_num_1, truediv], Original ATen: [aten.masked_fill, aten.sum, aten.clamp, aten.div]
# Source node to ATen node mapping:
# masked_fill_ => full_default, where
# truediv => div
# x_num_1 => clamp_min
# x_sum => sum_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%expand, %full_default, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [1]), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%expand_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %clamp_min), kwargs = {})
triton_poi_fused_clamp_div_masked_fill_sum_0 = async_compile.triton('triton_poi_fused_clamp_div_masked_fill_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_masked_fill_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_masked_fill_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp10 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tl.where(tmp2, tmp1, tmp3)
tmp6 = tmp5 == tmp1
tmp8 = tl.where(tmp6, tmp1, tmp7)
tmp9 = tmp4 + tmp8
tmp11 = tmp10 == tmp1
tmp13 = tl.where(tmp11, tmp1, tmp12)
tmp14 = tmp9 + tmp13
tmp16 = tmp15 == tmp1
tmp18 = tl.where(tmp16, tmp1, tmp17)
tmp19 = tmp14 + tmp18
tmp20 = 1.0
tmp21 = tmp0 == tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp5 == tmp20
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 + tmp24
tmp26 = tmp10 == tmp20
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp25 + tmp27
tmp29 = tmp15 == tmp20
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp28 + tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp20)
tmp33 = tmp19 / tmp32
tl.store(out_ptr0 + (x4), tmp33, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [masked_fill_, x_sum, x_num_1, truediv], Original ATen: [aten.masked_fill, aten.sum, aten.clamp, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_masked_fill_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AveragePooling(nn.Module):
def __init__(self):
super(AveragePooling, self).__init__()
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, x, x_mask):
"""
x_output: num_items x input_size x 1 --> num_items x input_size
"""
x_now = x.clone()
empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x_now)
x_now.data.masked_fill_(empty_mask.data, 0)
x_sum = torch.sum(x_now, 1)
x_num = torch.sum(x_mask.eq(1).float(), 1).unsqueeze(1).expand_as(x_sum
)
x_num = torch.clamp(x_num, min=1)
return x_sum / x_num
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_masked_fill_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tl.where(tmp2, tmp1, tmp3)
tmp6 = tmp5 == tmp1
tmp8 = tl.where(tmp6, tmp1, tmp7)
tmp9 = tmp4 + tmp8
tmp11 = tmp10 == tmp1
tmp13 = tl.where(tmp11, tmp1, tmp12)
tmp14 = tmp9 + tmp13
tmp16 = tmp15 == tmp1
tmp18 = tl.where(tmp16, tmp1, tmp17)
tmp19 = tmp14 + tmp18
tmp20 = 1.0
tmp21 = tmp0 == tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp5 == tmp20
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 + tmp24
tmp26 = tmp10 == tmp20
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp25 + tmp27
tmp29 = tmp15 == tmp20
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp28 + tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp20)
tmp33 = tmp19 / tmp32
tl.store(out_ptr0 + x4, tmp33, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_masked_fill_sum_0[grid(64)](arg1_1,
arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AveragePoolingNew(nn.Module):
def __init__(self):
super(AveragePoolingNew, self).__init__()
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mpandeydev/SDnetmod | AveragePooling | false | 7,272 | [
"MIT"
] | 1 | c8cdf6150e3cd28330359a7d81df236729522a69 | https://github.com/mpandeydev/SDnetmod/tree/c8cdf6150e3cd28330359a7d81df236729522a69 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, x, x_mask):
"""
x_output: num_items x input_size x 1 --> num_items x input_size
"""
x_now = x.clone()
empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x_now)
x_now.data.masked_fill_(empty_mask.data, 0)
x_sum = torch.sum(x_now, 1)
x_num = torch.sum(x_mask.eq(1).float(), 1).unsqueeze(1).expand_as(x_sum
)
x_num = torch.clamp(x_num, min=1)
return x_sum / x_num
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
SinenetComponent | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/un/cun6ihhwjyyhox6wzkegtighr4a3swoiif6bzjf2uy6ughgxorz7.py
# Topologically Sorted Source Nodes: [i_f, i_f_t, deg, s, mul_2], Original ATen: [aten.mul, aten.add, aten.sin]
# Source node to ATen node mapping:
# deg => add
# i_f => mul
# i_f_t => mul_1
# mul_2 => mul_2
# s => sin
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (4, %primals_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%add,), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %sin), kwargs = {})
triton_poi_fused_add_mul_sin_0 = async_compile.triton('triton_poi_fused_add_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sin_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr2 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr3 + (0))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp1 = 4.0
tmp2 = tmp1 * tmp0
tmp4 = tmp2 * tmp3
tmp7 = tmp4 + tmp6
tmp10 = tl_math.sin(tmp7)
tmp11 = tmp9 * tmp10
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ap/capawthl36q53mm4p74ufqms3xhssnufdiwutbdd2igrcv5r7q7b.py
# Topologically Sorted Source Nodes: [h_SBT, h_SB], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# h_SB => sum_1
# h_SBT => mul_3
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1]), kwargs = {})
triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_f, i_f_t, deg, s, mul_2], Original ATen: [aten.mul, aten.add, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sin_0.run(primals_1, primals_2, primals_3, primals_4, buf0, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_SBT, h_SB], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_1.run(buf1, primals_5, buf2, 64, grid=grid(64), stream=stream0)
return (buf2, buf1, primals_4, primals_5, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class SinenetComponent(torch.nn.Module):
def __init__(self, time_len, i):
super().__init__()
self.time_len = time_len
self.i = i
self.t_wav = 1.0 / 16000
self.log_f_mean = 5.02654
self.log_f_std = 0.373288
self.a = torch.nn.Parameter(torch.Tensor(1))
self.phi = torch.nn.Parameter(torch.Tensor(1))
def forward(self, x, f, t):
i_f = torch.mul(self.i, f)
i_f_t = torch.mul(i_f, t)
deg = torch.add(i_f_t, self.phi)
s = torch.sin(deg)
self.W = torch.mul(self.a, s)
h_SBT = torch.mul(self.W, x)
h_SB = torch.sum(h_SBT, dim=-1, keepdim=False)
return h_SB
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'time_len': 4, 'i': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sin_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp1 = 4.0
tmp2 = tmp1 * tmp0
tmp4 = tmp2 * tmp3
tmp7 = tmp4 + tmp6
tmp10 = tl_math.sin(tmp7)
tmp11 = tmp9 * tmp10
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sin_0[grid(256)](primals_1, primals_2,
primals_3, primals_4, buf0, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
del primals_2
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(64)](buf1, primals_5, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, buf1, primals_4, primals_5, buf0
class SinenetComponentNew(torch.nn.Module):
def __init__(self, time_len, i):
super().__init__()
self.time_len = time_len
self.i = i
self.t_wav = 1.0 / 16000
self.log_f_mean = 5.02654
self.log_f_std = 0.373288
self.a = torch.nn.Parameter(torch.Tensor(1))
self.phi = torch.nn.Parameter(torch.Tensor(1))
def forward(self, input_0, input_1, input_2):
primals_3 = self.a
primals_4 = self.phi
primals_1 = input_0
primals_2 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| moquan/22_Nov_2018 | SinenetComponent | false | 7,273 | [
"MIT"
] | 1 | eaa81bf5050d74612fe1322abcdb26a0a919e976 | https://github.com/moquan/22_Nov_2018/tree/eaa81bf5050d74612fe1322abcdb26a0a919e976 | import torch
class Model(torch.nn.Module):
def __init__(self, time_len, i):
super().__init__()
self.time_len = time_len
self.i = i
self.t_wav = 1.0 / 16000
self.log_f_mean = 5.02654
self.log_f_std = 0.373288
self.a = torch.nn.Parameter(torch.Tensor(1))
self.phi = torch.nn.Parameter(torch.Tensor(1))
def forward(self, x, f, t):
i_f = torch.mul(self.i, f)
i_f_t = torch.mul(i_f, t)
deg = torch.add(i_f_t, self.phi)
s = torch.sin(deg)
self.W = torch.mul(self.a, s)
h_SBT = torch.mul(self.W, x)
h_SB = torch.sum(h_SBT, dim=-1, keepdim=False)
return h_SB
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Net3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# out_3 => relu_1
# out_4 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kk/ckkc5xafafjuch75gwnhuryooqjc3zkq5tebbj3xugoo6gpc6wsg.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_7 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 70
x2 = xindex % 1120
x3 = (xindex // 1120)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1152*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300, ), (1, ))
assert_size_stride(primals_6, (300, 300), (300, 1))
assert_size_stride(primals_7, (300, ), (1, ))
assert_size_stride(primals_8, (70, 300), (300, 1))
assert_size_stride(primals_9, (70, ), (1, ))
assert_size_stride(primals_10, (1, 70), (70, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf15, 25600, grid=grid(25600), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf14, 19200, grid=grid(19200), stream=stream0)
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0)
buf5 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 300), (1, 300), 0), out=buf5)
buf6 = buf3; del buf3 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf5, primals_7, buf6, buf13, 19200, grid=grid(19200), stream=stream0)
del primals_7
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_2.run(buf6, buf7, 19200, grid=grid(19200), stream=stream0)
del buf6
buf8 = empty_strided_cuda((64, 70), (70, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (300, 70), (1, 300), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 70), (1120, 280, 70, 1), 0); del buf8 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 70), (1152, 280, 70, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf9, primals_9, buf12, 4480, grid=grid(4480), stream=stream0)
del primals_9
buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (64, 70), (70, 1), 0), reinterpret_tensor(primals_10, (70, 1), (1, 70), 0), alpha=1, beta=1, out=buf11)
del primals_11
return (reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, buf7, reinterpret_tensor(buf9, (64, 70), (70, 1), 0), primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((300, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((70, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((70, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 70), (70, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Net3(nn.Module):
"""
Net3 is a neural network consisting of four hidden layers with sizes 400,
300, 300 and 70
"""
layer_sizes = [400, 300, 300, 70]
hidden1 = 400
hidden2 = 300
hidden3 = 300
hidden4 = 70
def __init__(self, input_size):
super(Net3, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.fc5 = nn.Linear(self.hidden4, 1)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.relu3(out)
out = self.fc4(out)
out = self.relu4(out)
out = self.fc5(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 70
x2 = xindex % 1120
x3 = xindex // 1120
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1152 * x3), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (300, 300), (300, 1))
assert_size_stride(primals_7, (300,), (1,))
assert_size_stride(primals_8, (70, 300), (300, 1))
assert_size_stride(primals_9, (70,), (1,))
assert_size_stride(primals_10, (1, 70), (70, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf15 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf15, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf14, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 300), (
1, 300), 0), out=buf5)
buf6 = buf3
del buf3
buf13 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf5,
primals_7, buf6, buf13, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_7
buf7 = buf5
del buf5
triton_poi_fused_relu_view_2[grid(19200)](buf6, buf7, 19200, XBLOCK
=128, num_warps=4, num_stages=1)
del buf6
buf8 = empty_strided_cuda((64, 70), (70, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (300, 70), (1,
300), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 70), (1120, 280, 70, 1), 0)
del buf8
buf12 = empty_strided_cuda((4, 4, 4, 70), (1152, 280, 70, 1), torch
.bool)
triton_poi_fused_relu_threshold_backward_3[grid(4480)](buf9,
primals_9, buf12, 4480, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (64, 70),
(70, 1), 0), reinterpret_tensor(primals_10, (70, 1), (1, 70), 0
), alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), buf4, buf7, reinterpret_tensor(buf9, (64, 70), (70, 1), 0
), primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15
class Net3New(nn.Module):
"""
Net3 is a neural network consisting of four hidden layers with sizes 400,
300, 300 and 70
"""
layer_sizes = [400, 300, 300, 70]
hidden1 = 400
hidden2 = 300
hidden3 = 300
hidden4 = 70
def __init__(self, input_size):
super(Net3New, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.fc5 = nn.Linear(self.hidden4, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| moritzschaefer/pavooc | Net3 | false | 7,274 | [
"MIT"
] | 1 | 735f5455f9a95a5734436a24e2aa92cf600c91af | https://github.com/moritzschaefer/pavooc/tree/735f5455f9a95a5734436a24e2aa92cf600c91af | import torch
from torch import nn
class Model(nn.Module):
"""
Net3 is a neural network consisting of four hidden layers with sizes 400,
300, 300 and 70
"""
layer_sizes = [400, 300, 300, 70]
hidden1 = 400
hidden2 = 300
hidden3 = 300
hidden4 = 70
def __init__(self, input_size):
super().__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.fc5 = nn.Linear(self.hidden4, 1)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.relu3(out)
out = self.fc4(out)
out = self.relu4(out)
out = self.fc5(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MaxPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ye/cyer3e3q24cnknj35g4jhiqyzqlzuynppjhp52ioic4qspesjovr.py
# Topologically Sorted Source Nodes: [masked_fill_, max_1, eq_1, masked_fill__1], Original ATen: [aten.masked_fill, aten.max, aten.eq]
# Source node to ATen node mapping:
# eq_1 => eq_1
# masked_fill_ => full_default, where
# masked_fill__1 => full_default_1, where_1
# max_1 => max_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%expand, %full_default, %arg1_1), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%where, 1), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%getitem, -1000000.0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %getitem), kwargs = {})
triton_poi_fused_eq_masked_fill_max_0 = async_compile.triton('triton_poi_fused_eq_masked_fill_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_masked_fill_max_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_masked_fill_max_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -1000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp20 == tmp4
tmp22 = tl.where(tmp21, tmp1, tmp20)
tl.store(in_out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [masked_fill_, max_1, eq_1, masked_fill__1], Original ATen: [aten.masked_fill, aten.max, aten.eq]
stream0 = get_raw_stream(0)
triton_poi_fused_eq_masked_fill_max_0.run(buf1, arg0_1, arg1_1, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaxPooling(nn.Module):
def __init__(self):
super(MaxPooling, self).__init__()
self.MIN = -1000000.0
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, x, x_mask):
"""
x_output: num_items x input_size x 1 --> num_items x input_size
"""
empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x)
x_now = x.clone()
x_now.data.masked_fill_(empty_mask.data, self.MIN)
x_output = x_now.max(1)[0]
x_output.data.masked_fill_(x_output.data.eq(self.MIN), 0)
return x_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_eq_masked_fill_max_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -1000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp20 == tmp4
tmp22 = tl.where(tmp21, tmp1, tmp20)
tl.store(in_out_ptr0 + x2, tmp22, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_eq_masked_fill_max_0[grid(16)](buf1, arg0_1,
arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MaxPoolingNew(nn.Module):
def __init__(self):
super(MaxPoolingNew, self).__init__()
self.MIN = -1000000.0
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, input_0, input_1):
arg1_1 = input_0
arg0_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mpandeydev/SDnetmod | MaxPooling | false | 7,275 | [
"MIT"
] | 1 | c8cdf6150e3cd28330359a7d81df236729522a69 | https://github.com/mpandeydev/SDnetmod/tree/c8cdf6150e3cd28330359a7d81df236729522a69 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.MIN = -1000000.0
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, x, x_mask):
"""
x_output: num_items x input_size x 1 --> num_items x input_size
"""
empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x)
x_now = x.clone()
x_now.data.masked_fill_(empty_mask.data, self.MIN)
x_output = x_now.max(1)[0]
x_output.data.masked_fill_(x_output.data.eq(self.MIN), 0)
return x_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 2048, grid=grid(2048), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 2048, grid=grid(2048), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class Actor(torch.nn.Module):
def __init__(self, numObs, numActions):
super(Actor, self).__init__()
self.actor_input = nn.Linear(numObs, 32)
self.actor_fc1 = nn.Linear(32, 32)
self.actor_output = nn.Linear(32, numActions)
def forward(self, x):
x = F.relu(self.actor_input(x))
x = F.relu(self.actor_fc1(x))
logits = self.actor_output(x)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'numObs': 4, 'numActions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3,
primals_5, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0), primals_6, buf5, primals_4, buf6
class ActorNew(torch.nn.Module):
def __init__(self, numObs, numActions):
super(ActorNew, self).__init__()
self.actor_input = nn.Linear(numObs, 32)
self.actor_fc1 = nn.Linear(32, 32)
self.actor_output = nn.Linear(32, numActions)
def forward(self, input_0):
primals_1 = self.actor_input.weight
primals_2 = self.actor_input.bias
primals_4 = self.actor_fc1.weight
primals_5 = self.actor_fc1.bias
primals_6 = self.actor_output.weight
primals_7 = self.actor_output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| mpgussert/fundamentalRL | Actor | false | 7,276 | [
"MIT"
] | 1 | 4f45436226e0823c21cac316dec8bbf1df697467 | https://github.com/mpgussert/fundamentalRL/tree/4f45436226e0823c21cac316dec8bbf1df697467 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self, numObs, numActions):
super().__init__()
self.actor_input = nn.Linear(numObs, 32)
self.actor_fc1 = nn.Linear(32, 32)
self.actor_output = nn.Linear(32, numActions)
def forward(self, x):
x = F.relu(self.actor_input(x))
x = F.relu(self.actor_fc1(x))
logits = self.actor_output(x)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Agent | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (32, 4), (4, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 32), (32, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (1, 32), (32, 1))
assert_size_stride(primals_13, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf14, 2048, grid=grid(2048), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf13, 2048, grid=grid(2048), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 32), (1, 4), 0), out=buf5)
del primals_8
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf5 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf6, primals_9, buf12, 2048, grid=grid(2048), stream=stream0)
del primals_9
buf7 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (64, 32), (32, 1), 0), reinterpret_tensor(primals_10, (32, 32), (1, 32), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf7 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [z_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf8, primals_11, buf11, 2048, grid=grid(2048), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 32), (32, 1), 0), reinterpret_tensor(primals_12, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf10)
del primals_13
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf6, (64, 32), (32, 1), 0), reinterpret_tensor(buf8, (64, 32), (32, 1), 0), primals_12, buf11, primals_10, buf12, primals_6, buf13, primals_4, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class Agent(torch.nn.Module):
def __init__(self, numObs, numActions):
super(Agent, self).__init__()
self.critic_input = nn.Linear(numObs, 32)
self.critic_fc1 = nn.Linear(32, 32)
self.critic_output = nn.Linear(32, 1)
self.actor_input = nn.Linear(numObs, 32)
self.actor_fc1 = nn.Linear(32, 32)
self.actor_output = nn.Linear(32, numActions)
def forward(self, x):
y = F.relu(self.actor_input(x))
y = F.relu(self.actor_fc1(y))
logits = self.actor_output(y)
z = F.relu(self.critic_input(x))
z = F.relu(self.critic_fc1(z))
value = self.critic_output(z)
return logits, value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'numObs': 4, 'numActions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (4, 32), (32, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (32, 4), (4, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32), (32, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (1, 32), (32, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf14 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf14, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf13 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3,
primals_5, buf13, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 32), (1, 4), 0), out=buf5)
del primals_8
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf5
buf12 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf6,
primals_9, buf12, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_10, (32, 32), (1, 32), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf7
buf11 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf8,
primals_11, buf11, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_12, (32, 1), (1, 32), 0
), alpha=1, beta=1, out=buf10)
del primals_13
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf6, (64, 32), (32,
1), 0), reinterpret_tensor(buf8, (64, 32), (32, 1), 0
), primals_12, buf11, primals_10, buf12, primals_6, buf13, primals_4, buf14
class AgentNew(torch.nn.Module):
def __init__(self, numObs, numActions):
super(AgentNew, self).__init__()
self.critic_input = nn.Linear(numObs, 32)
self.critic_fc1 = nn.Linear(32, 32)
self.critic_output = nn.Linear(32, 1)
self.actor_input = nn.Linear(numObs, 32)
self.actor_fc1 = nn.Linear(32, 32)
self.actor_output = nn.Linear(32, numActions)
def forward(self, input_0):
primals_1 = self.critic_input.weight
primals_2 = self.critic_input.bias
primals_4 = self.critic_fc1.weight
primals_5 = self.critic_fc1.bias
primals_12 = self.critic_output.weight
primals_13 = self.critic_output.bias
primals_8 = self.actor_input.weight
primals_9 = self.actor_input.bias
primals_10 = self.actor_fc1.weight
primals_11 = self.actor_fc1.bias
primals_6 = self.actor_output.weight
primals_7 = self.actor_output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
| mpgussert/fundamentalRL | Agent | false | 7,277 | [
"MIT"
] | 1 | 4f45436226e0823c21cac316dec8bbf1df697467 | https://github.com/mpgussert/fundamentalRL/tree/4f45436226e0823c21cac316dec8bbf1df697467 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self, numObs, numActions):
super().__init__()
self.critic_input = nn.Linear(numObs, 32)
self.critic_fc1 = nn.Linear(32, 32)
self.critic_output = nn.Linear(32, 1)
self.actor_input = nn.Linear(numObs, 32)
self.actor_fc1 = nn.Linear(32, 32)
self.actor_output = nn.Linear(32, numActions)
def forward(self, x):
y = F.relu(self.actor_input(x))
y = F.relu(self.actor_fc1(y))
logits = self.actor_output(y)
z = F.relu(self.critic_input(x))
z = F.relu(self.critic_fc1(z))
value = self.critic_output(z)
return logits, value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
cnn_7layer_alt | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5j/c5ji4mfxenghd3ccczky5osir42aijmeisydrv7ufxv2edv4ktf6.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/m6/cm6p6okjwnkvhqumzqnzw3a4chp3uvjzvn7re4pqvpizj4cehwfz.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/is/ciswnsgoxwzttnk3n4uptq5a77i3prr6wqkpzjhju7e4d6ki4jvh.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c6/cc623uyseovkuyagjqv6tbtxb6rcyfk2owm2q5p6nm42vcizngup.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3d/c3daizw6k7n3mdqdhvpifdxm2baxmazxb7opdgejzyeaavnbkn3d.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (128, 16), (16, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, 128), (128, 1))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (10, 128), (128, 1))
assert_size_stride(primals_15, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 512, grid=grid(512), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 2, 2), (32, 4, 2, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 128, grid=grid(128), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 2, 2), (64, 4, 2, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1))
buf7 = reinterpret_tensor(buf6, (4, 16, 1, 1), (16, 1, 64, 64), 0); del buf6 # reuse
buf13 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf7, primals_9, buf13, 64, grid=grid(64), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 128), (1, 16), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_11, 512, grid=grid(512), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_12, (128, 128), (1, 128), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf11, primals_13, 512, grid=grid(512), stream=stream0)
del primals_13
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf11, reinterpret_tensor(primals_14, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf12)
del primals_15
return (buf12, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, reinterpret_tensor(buf7, (4, 16), (16, 1), 0), buf9, buf11, primals_14, primals_12, primals_10, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class cnn_7layer_alt(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=128):
super(cnn_7layer_alt, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(4 * width, 4 * width, 4, stride=2, padding=1)
self.conv3 = nn.Conv2d(4 * width, 8 * width, 3, stride=1, padding=1)
self.conv4 = nn.Conv2d(8 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, linear_size)
self.fc3 = nn.Linear(linear_size, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (8, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (128, 16), (16, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128), (128, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (10, 128), (128, 1))
assert_size_stride(primals_15, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(512)](buf1, primals_2, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 2, 2), (32, 4, 2, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(128)](buf3, primals_5, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 2, 2), (64, 4, 2, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(256)](buf5, primals_7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1))
buf7 = reinterpret_tensor(buf6, (4, 16, 1, 1), (16, 1, 64, 64), 0)
del buf6
buf13 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(64)](buf7,
primals_9, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 16), (16, 1), 0),
reinterpret_tensor(primals_10, (16, 128), (1, 16), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(512)](buf9, primals_11, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_12, (128, 128),
(1, 128), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_4[grid(512)](buf11, primals_13, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_13
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_15, buf11, reinterpret_tensor(
primals_14, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf12)
del primals_15
return (buf12, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5, reinterpret_tensor(buf7, (4, 16), (16, 1), 0),
buf9, buf11, primals_14, primals_12, primals_10, buf13)
class cnn_7layer_altNew(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=128):
super(cnn_7layer_altNew, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(4 * width, 4 * width, 4, stride=2, padding=1)
self.conv3 = nn.Conv2d(4 * width, 8 * width, 3, stride=1, padding=1)
self.conv4 = nn.Conv2d(8 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, linear_size)
self.fc3 = nn.Linear(linear_size, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc2.weight
primals_13 = self.fc2.bias
primals_14 = self.fc3.weight
primals_15 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| mnmueller/auto_LiRPA | cnn_7layer_alt | false | 7,278 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_ch, in_dim, width=2, linear_size=128):
super().__init__()
self.conv1 = nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(4 * width, 4 * width, 4, stride=2, padding=1)
self.conv3 = nn.Conv2d(4 * width, 8 * width, 3, stride=1, padding=1)
self.conv4 = nn.Conv2d(8 * width, 8 * width, 4, stride=2, padding=1)
self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4),
linear_size)
self.fc2 = nn.Linear(linear_size, linear_size)
self.fc3 = nn.Linear(linear_size, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Inception | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [conv2d_1, branch3x3_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# branch3x3_1 => relu_1
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tn/ctnpkytwfzoa42qs7orq2dukxazj6xkswsaijmba7yloy2im5ocs.py
# Topologically Sorted Source Nodes: [max_pool2d, branchM_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# branchM_1 => relu_5
# max_pool2d => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%primals_3, [3, 3], [1, 1], [1, 1], [1, 1], False), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x3 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + x3), tmp10 & xmask, other=float("-inf"))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + x3), tmp16 & xmask, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + x3), tmp23 & xmask, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + x3), tmp30 & xmask, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x3), tmp33 & xmask, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tl.full([1], 0, tl.int32)
tmp53 = triton_helpers.maximum(tmp52, tmp51)
tl.store(in_out_ptr0 + (x3), tmp53, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/py/cpyuiu2e7w6632is4nthozh6f4rhru6fh5gc65ctjkqznk5vbiro.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_2, %relu_4, %relu_6], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp15 & xmask, other=0.0)
tmp17 = tl.load(in_ptr3 + ((-4) + x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 12, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp25 & xmask, other=0.0)
tmp27 = tl.load(in_ptr5 + ((-8) + x1), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tmp33 = tl.full([1], 16, tl.int64)
tmp34 = tmp0 < tmp33
tmp35 = tl.load(in_ptr6 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp32 & xmask, other=0.0)
tmp36 = tl.load(in_ptr7 + ((-12) + x1), tmp32 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + (x3), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/64/c64kbll2vwngzupaesjb3bkeqvvmff5z5c4ptcrwvfprjnzaxdkv.py
# Topologically Sorted Source Nodes: [conv2d_5, branchM_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# branchM_2 => relu_6
# conv2d_5 => convolution_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, branch3x3_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf2, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(primals_3, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, branch5x5_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf5, primals_9, 256, grid=grid(256), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [max_pool2d, branchM_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_1.run(buf8, primals_3, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf0, primals_2, buf3, primals_7, buf6, primals_11, buf9, primals_13, buf10, 1024, grid=grid(1024), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_5, branchM_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf9, primals_13, buf11, 256, grid=grid(256), stream=stream0)
del buf9
del primals_13
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, branch5x5_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf6, primals_11, buf12, 256, grid=grid(256), stream=stream0)
del buf6
del primals_11
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, branch3x3_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf3, primals_7, buf13, 256, grid=grid(256), stream=stream0)
del buf3
del primals_7
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, branch1x1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf0, primals_2, buf14, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, buf2, buf5, buf8, buf11, buf12, buf13, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, output_relu=True):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU() if output_relu else None
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.relu:
x = self.relu(x)
return x
class Inception(nn.Module):
def __init__(self, channel, batch_norm=False):
super(Inception, self).__init__()
if batch_norm is False:
self.branch1x1 = nn.Conv2d(channel[0], channel[1], kernel_size=
(1, 1), stride=1)
self.branch3x3_1 = nn.Conv2d(channel[0], channel[2],
kernel_size=(1, 1), stride=1)
self.branch3x3_2 = nn.Conv2d(channel[2], channel[3],
kernel_size=(3, 3), stride=1, padding=1)
self.branch5x5_1 = nn.Conv2d(channel[0], channel[4],
kernel_size=(1, 1), stride=1)
self.branch5x5_2 = nn.Conv2d(channel[4], channel[5],
kernel_size=(5, 5), stride=1, padding=2)
self.branchM_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branchM_2 = nn.Conv2d(channel[0], channel[6], kernel_size=
(1, 1), stride=1)
else:
self.branch1x1 = BasicConv2d(channel[0], channel[1],
kernel_size=(1, 1), stride=1)
self.branch3x3_1 = BasicConv2d(channel[0], channel[2],
kernel_size=(1, 1), stride=1)
self.branch3x3_2 = BasicConv2d(channel[2], channel[3],
kernel_size=(3, 3), stride=1, padding=1)
self.branch5x5_1 = BasicConv2d(channel[0], channel[4],
kernel_size=(1, 1), stride=1)
self.branch5x5_2 = BasicConv2d(channel[4], channel[5],
kernel_size=(5, 5), stride=1, padding=2)
self.branchM_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branchM_2 = BasicConv2d(channel[0], channel[6],
kernel_size=(1, 1), stride=1)
self.relu = nn.ReLU(True)
def forward(self, x):
branch1x1 = self.relu(self.branch1x1(x))
branch3x3_1 = self.relu(self.branch3x3_1(x))
branch3x3_2 = self.relu(self.branch3x3_2(branch3x3_1))
branch5x5_1 = self.relu(self.branch5x5_1(x))
branch5x5_2 = self.relu(self.branch5x5_2(branch5x5_1))
branchM_1 = self.relu(self.branchM_1(x))
branchM_2 = self.relu(self.branchM_2(branchM_1))
outputs = [branch1x1, branch3x3_2, branch5x5_2, branchM_2]
return torch.cat(outputs, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': [4, 4, 4, 4, 4, 4, 4]}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x3), tmp10 & xmask, other=float('-inf'))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x3), tmp16 & xmask, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x3), tmp23 & xmask, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x3), tmp30 & xmask, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x3, tmp33 & xmask, other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tl.full([1], 0, tl.int32)
tmp53 = triton_helpers.maximum(tmp52, tmp51)
tl.store(in_out_ptr0 + x3, tmp53, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tmp12 & tmp14
tmp16 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp17 = tl.load(in_ptr3 + (-4 + x1), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp16 + tmp17
tmp19 = triton_helpers.maximum(tmp8, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tmp0 >= tmp13
tmp23 = tl.full([1], 12, tl.int64)
tmp24 = tmp0 < tmp23
tmp25 = tmp22 & tmp24
tmp26 = tl.load(in_ptr4 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp25 &
xmask, other=0.0)
tmp27 = tl.load(in_ptr5 + (-8 + x1), tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp8, tmp28)
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp25, tmp29, tmp30)
tmp32 = tmp0 >= tmp23
tl.full([1], 16, tl.int64)
tmp35 = tl.load(in_ptr6 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp32 &
xmask, other=0.0)
tmp36 = tl.load(in_ptr7 + (-12 + x1), tmp32 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = triton_helpers.maximum(tmp8, tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp32, tmp38, tmp39)
tmp41 = tl.where(tmp25, tmp31, tmp40)
tmp42 = tl.where(tmp15, tmp21, tmp41)
tmp43 = tl.where(tmp4, tmp11, tmp42)
tl.store(out_ptr0 + x3, tmp43, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf2, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = extern_kernels.convolution(primals_3, primals_8, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(256)](buf5, primals_9, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = buf7
del buf7
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(256)](buf8,
primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1))
buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(1024)](buf0, primals_2, buf3, primals_7,
buf6, primals_11, buf9, primals_13, buf10, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(256)](buf9,
primals_13, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf9
del primals_13
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(256)](buf6,
primals_11, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
del primals_11
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(256)](buf3,
primals_7, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_7
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(256)](buf0,
primals_2, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, buf2, buf5, buf8, buf11, buf12, buf13, buf14)
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, output_relu=True):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU() if output_relu else None
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.relu:
x = self.relu(x)
return x
class InceptionNew(nn.Module):
def __init__(self, channel, batch_norm=False):
super(InceptionNew, self).__init__()
if batch_norm is False:
self.branch1x1 = nn.Conv2d(channel[0], channel[1], kernel_size=
(1, 1), stride=1)
self.branch3x3_1 = nn.Conv2d(channel[0], channel[2],
kernel_size=(1, 1), stride=1)
self.branch3x3_2 = nn.Conv2d(channel[2], channel[3],
kernel_size=(3, 3), stride=1, padding=1)
self.branch5x5_1 = nn.Conv2d(channel[0], channel[4],
kernel_size=(1, 1), stride=1)
self.branch5x5_2 = nn.Conv2d(channel[4], channel[5],
kernel_size=(5, 5), stride=1, padding=2)
self.branchM_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branchM_2 = nn.Conv2d(channel[0], channel[6], kernel_size=
(1, 1), stride=1)
else:
self.branch1x1 = BasicConv2d(channel[0], channel[1],
kernel_size=(1, 1), stride=1)
self.branch3x3_1 = BasicConv2d(channel[0], channel[2],
kernel_size=(1, 1), stride=1)
self.branch3x3_2 = BasicConv2d(channel[2], channel[3],
kernel_size=(3, 3), stride=1, padding=1)
self.branch5x5_1 = BasicConv2d(channel[0], channel[4],
kernel_size=(1, 1), stride=1)
self.branch5x5_2 = BasicConv2d(channel[4], channel[5],
kernel_size=(5, 5), stride=1, padding=2)
self.branchM_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branchM_2 = BasicConv2d(channel[0], channel[6],
kernel_size=(1, 1), stride=1)
self.relu = nn.ReLU(True)
def forward(self, input_0):
primals_1 = self.branch1x1.weight
primals_2 = self.branch1x1.bias
primals_4 = self.branch3x3_1.weight
primals_5 = self.branch3x3_1.bias
primals_6 = self.branch3x3_2.weight
primals_7 = self.branch3x3_2.bias
primals_8 = self.branch5x5_1.weight
primals_9 = self.branch5x5_1.bias
primals_10 = self.branch5x5_2.weight
primals_11 = self.branch5x5_2.bias
primals_12 = self.branchM_2.weight
primals_13 = self.branchM_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| moh2236945/pytorch_classification | Inception | false | 7,279 | [
"MIT"
] | 1 | 8816f08af327e06208b348a78d9c63c133b6a628 | https://github.com/moh2236945/pytorch_classification/tree/8816f08af327e06208b348a78d9c63c133b6a628 | import torch
import torch.nn as nn
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, output_relu=True):
super().__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU() if output_relu else None
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.relu:
x = self.relu(x)
return x
class Model(nn.Module):
def __init__(self, channel, batch_norm=False):
super().__init__()
if batch_norm is False:
self.branch1x1 = nn.Conv2d(channel[0], channel[1], kernel_size=
(1, 1), stride=1)
self.branch3x3_1 = nn.Conv2d(channel[0], channel[2],
kernel_size=(1, 1), stride=1)
self.branch3x3_2 = nn.Conv2d(channel[2], channel[3],
kernel_size=(3, 3), stride=1, padding=1)
self.branch5x5_1 = nn.Conv2d(channel[0], channel[4],
kernel_size=(1, 1), stride=1)
self.branch5x5_2 = nn.Conv2d(channel[4], channel[5],
kernel_size=(5, 5), stride=1, padding=2)
self.branchM_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branchM_2 = nn.Conv2d(channel[0], channel[6], kernel_size=
(1, 1), stride=1)
else:
self.branch1x1 = BasicConv2d(channel[0], channel[1],
kernel_size=(1, 1), stride=1)
self.branch3x3_1 = BasicConv2d(channel[0], channel[2],
kernel_size=(1, 1), stride=1)
self.branch3x3_2 = BasicConv2d(channel[2], channel[3],
kernel_size=(3, 3), stride=1, padding=1)
self.branch5x5_1 = BasicConv2d(channel[0], channel[4],
kernel_size=(1, 1), stride=1)
self.branch5x5_2 = BasicConv2d(channel[4], channel[5],
kernel_size=(5, 5), stride=1, padding=2)
self.branchM_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branchM_2 = BasicConv2d(channel[0], channel[6],
kernel_size=(1, 1), stride=1)
self.relu = nn.ReLU(True)
def forward(self, x):
branch1x1 = self.relu(self.branch1x1(x))
branch3x3_1 = self.relu(self.branch3x3_1(x))
branch3x3_2 = self.relu(self.branch3x3_2(branch3x3_1))
branch5x5_1 = self.relu(self.branch5x5_1(x))
branch5x5_2 = self.relu(self.branch5x5_2(branch5x5_1))
branchM_1 = self.relu(self.branchM_1(x))
branchM_2 = self.relu(self.branchM_2(branchM_1))
outputs = [branch1x1, branch3x3_2, branch5x5_2, branchM_2]
return torch.cat(outputs, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SharedAgent | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jm/cjmjqfjv2ijia2nagoscrnh2gu57uuxti5zfjtxbtxgqzk2qxxoh.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8, ), (1, ))
assert_size_stride(primals_8, (4, 8), (8, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (1, 8), (8, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf10, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf4 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf5, primals_7, buf9, 512, grid=grid(512), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 8), (8, 1), 0), reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf8 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf5, (64, 8), (8, 1), 0), reinterpret_tensor(primals_10, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf8)
del primals_11
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 8), (8, 1), 0), primals_10, primals_8, buf9, primals_6, buf10, primals_4, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class SharedAgent(torch.nn.Module):
"""
A simple two headed / chimera Actor Critic agent.
The actor and critic share the body of the network.
It is argued that this is because "good" actions
correlate to visiting states with "large" values, and
so there should exist some form of shared information
between these two functions, thus motivating the shared
body. However, I haven't seen a rigorous proof of this,
and training an AC model with a shared body usually just
leads to added complications in my experience. If you
know a good reference for a mathematical proof on why
this should be done please let me know!
"""
def __init__(self, numObs, numActions, numHidden):
super(SharedAgent, self).__init__()
self.shared_input = nn.Linear(numObs, numHidden)
self.shared_fc1 = nn.Linear(numHidden, numHidden)
self.shared_fc2 = nn.Linear(numHidden, 2 * numHidden)
self.actor_output = nn.Linear(2 * numHidden, numActions)
self.critic_output = nn.Linear(2 * numHidden, 1)
def forward(self, x):
x = F.relu(self.shared_input(x))
x = F.relu(self.shared_fc1(x))
x = F.relu(self.shared_fc2(x))
logits = self.actor_output(x)
value = self.critic_output(x)
return logits, value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'numObs': 4, 'numActions': 4, 'numHidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (8, 4), (4, 1))
assert_size_stride(primals_7, (8,), (1,))
assert_size_stride(primals_8, (4, 8), (8, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (1, 8), (8, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 8), (128, 32, 8, 1), 0)
del buf4
buf9 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(512)](buf5,
primals_7, buf9, 512, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_8, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf6)
del primals_9
buf8 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf5, (64, 8),
(8, 1), 0), reinterpret_tensor(primals_10, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf8)
del primals_11
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 8), (8, 1), 0
), primals_10, primals_8, buf9, primals_6, buf10, primals_4, buf11
class SharedAgentNew(torch.nn.Module):
"""
A simple two headed / chimera Actor Critic agent.
The actor and critic share the body of the network.
It is argued that this is because "good" actions
correlate to visiting states with "large" values, and
so there should exist some form of shared information
between these two functions, thus motivating the shared
body. However, I haven't seen a rigorous proof of this,
and training an AC model with a shared body usually just
leads to added complications in my experience. If you
know a good reference for a mathematical proof on why
this should be done please let me know!
"""
def __init__(self, numObs, numActions, numHidden):
super(SharedAgentNew, self).__init__()
self.shared_input = nn.Linear(numObs, numHidden)
self.shared_fc1 = nn.Linear(numHidden, numHidden)
self.shared_fc2 = nn.Linear(numHidden, 2 * numHidden)
self.actor_output = nn.Linear(2 * numHidden, numActions)
self.critic_output = nn.Linear(2 * numHidden, 1)
def forward(self, input_0):
primals_1 = self.shared_input.weight
primals_2 = self.shared_input.bias
primals_4 = self.shared_fc1.weight
primals_5 = self.shared_fc1.bias
primals_6 = self.shared_fc2.weight
primals_7 = self.shared_fc2.bias
primals_8 = self.actor_output.weight
primals_9 = self.actor_output.bias
primals_10 = self.critic_output.weight
primals_11 = self.critic_output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| mpgussert/fundamentalRL | SharedAgent | false | 7,280 | [
"MIT"
] | 1 | 4f45436226e0823c21cac316dec8bbf1df697467 | https://github.com/mpgussert/fundamentalRL/tree/4f45436226e0823c21cac316dec8bbf1df697467 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(torch.nn.Module):
"""
A simple two headed / chimera Actor Critic agent.
The actor and critic share the body of the network.
It is argued that this is because "good" actions
correlate to visiting states with "large" values, and
so there should exist some form of shared information
between these two functions, thus motivating the shared
body. However, I haven't seen a rigorous proof of this,
and training an AC model with a shared body usually just
leads to added complications in my experience. If you
know a good reference for a mathematical proof on why
this should be done please let me know!
"""
def __init__(self, numObs, numActions, numHidden):
super().__init__()
self.shared_input = nn.Linear(numObs, numHidden)
self.shared_fc1 = nn.Linear(numHidden, numHidden)
self.shared_fc2 = nn.Linear(numHidden, 2 * numHidden)
self.actor_output = nn.Linear(2 * numHidden, numActions)
self.critic_output = nn.Linear(2 * numHidden, 1)
def forward(self, x):
x = F.relu(self.shared_input(x))
x = F.relu(self.shared_fc1(x))
x = F.relu(self.shared_fc2(x))
logits = self.actor_output(x)
value = self.critic_output(x)
return logits, value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
BoundNot | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qs/cqsrkz3gbpplm7lww4odi2i7zyfpi7judf4uepdd45jimhykbtq2.py
# Topologically Sorted Source Nodes: [logical_not], Original ATen: [aten.logical_not]
# Source node to ATen node mapping:
# logical_not => logical_not
# Graph fragment:
# %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_logical_not_0 = async_compile.triton('triton_poi_fused_logical_not_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logical_not_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = (tmp0 != 0)
tmp2 = tmp1 == 0
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [logical_not], Original ATen: [aten.logical_not]
stream0 = get_raw_stream(0)
triton_poi_fused_logical_not_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
deviation = A.norm(self.dual_norm, -1) * self.eps
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
else:
bound = x + sign * self.eps
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundNot(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def forward(self, x):
return x.logical_not()
def infer_batch_dim(self, batch_size, *x):
return x[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 != 0
tmp2 = tmp1 == 0
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_logical_not_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
deviation = A.norm(self.dual_norm, -1) * self.eps
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
else:
bound = x + sign * self.eps
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundNotNew(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
def infer_batch_dim(self, batch_size, *x):
return x[0]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mnmueller/auto_LiRPA | BoundNot | false | 7,281 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
deviation = A.norm(self.dual_norm, -1) * self.eps
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
else:
bound = x + sign * self.eps
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x -
# ... truncated (>4000 chars) for memory efficiency |
Net5 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py
# Topologically Sorted Source Nodes: [out_3, out_5], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# out_3 => relu_1
# out_5 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dz/cdzzjqufxgjdtwmtqoqggqn2ny2ysfyvvnngvb35noosm27wiln3.py
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_6 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ep/cepy3a3v2ftjseqnazzpg6ymclul67kiqspcli35c422aj3rouiq.py
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_9 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ix/cix6ohige22nx5mqvwy7agh5yjldprz3tavakjwe7i3isipk53ov.py
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_12 => relu_4
# Graph fragment:
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300, ), (1, ))
assert_size_stride(primals_6, (200, 300), (300, 1))
assert_size_stride(primals_7, (200, ), (1, ))
assert_size_stride(primals_8, (100, 200), (200, 1))
assert_size_stride(primals_9, (100, ), (1, ))
assert_size_stride(primals_10, (60, 100), (100, 1))
assert_size_stride(primals_11, (60, ), (1, ))
assert_size_stride(primals_12, (1, 60), (60, 1))
assert_size_stride(primals_13, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse
buf17 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf17, 25600, grid=grid(25600), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf16, 19200, grid=grid(19200), stream=stream0)
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3, out_5], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0)
del buf3
buf5 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 200), (1, 300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf5 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf6, primals_7, buf15, 12800, grid=grid(12800), stream=stream0)
del primals_7
buf7 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (64, 200), (200, 1), 0), reinterpret_tensor(primals_8, (200, 100), (1, 200), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf7 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf8, primals_9, buf14, 6400, grid=grid(6400), stream=stream0)
del primals_9
buf9 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (64, 100), (100, 1), 0), reinterpret_tensor(primals_10, (100, 60), (1, 100), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 60), (960, 240, 60, 1), 0); del buf9 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf10, primals_11, buf13, 3840, grid=grid(3840), stream=stream0)
del primals_11
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf10, (64, 60), (60, 1), 0), reinterpret_tensor(primals_12, (60, 1), (1, 60), 0), alpha=1, beta=1, out=buf12)
del primals_13
return (reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, reinterpret_tensor(buf6, (64, 200), (200, 1), 0), reinterpret_tensor(buf8, (64, 100), (100, 1), 0), reinterpret_tensor(buf10, (64, 60), (60, 1), 0), primals_12, buf13, primals_10, buf14, primals_8, buf15, primals_6, buf16, primals_4, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((200, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((100, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((60, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((60, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 60), (60, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn.init import kaiming_normal
from torch.nn.init import normal
def weights_init(m):
if isinstance(m, (nn.Conv1d, nn.Linear)):
kaiming_normal(m.weight.data)
try:
kaiming_normal(m.bias.data)
except ValueError:
normal(m.bias.data)
class Net5(nn.Module):
"""
Net5 is a neural network consisting of five hidden layers with sizes 400,
300, 200, 100 and 60
Furthermore there are three dropout layers
"""
hidden1 = 400
hidden2 = 300
hidden3 = 200
hidden4 = 100
hidden5 = 60
def __init__(self, input_size):
super(Net5, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.drop1 = nn.Dropout(0.2)
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.drop2 = nn.Dropout(0.15)
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.drop3 = nn.Dropout(0.15)
self.fc5 = nn.Linear(self.hidden4, self.hidden5)
self.relu5 = nn.ReLU()
self.fc6 = nn.Linear(self.hidden5, 1)
self.apply(weights_init)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.drop1(out)
out = self.fc3(out)
out = self.relu3(out)
out = self.drop2(out)
out = self.fc4(out)
out = self.relu4(out)
out = self.drop3(out)
out = self.fc5(out)
out = self.relu5(out)
out = self.fc6(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn.init import kaiming_normal
from torch.nn.init import normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (200, 300), (300, 1))
assert_size_stride(primals_7, (200,), (1,))
assert_size_stride(primals_8, (100, 200), (200, 1))
assert_size_stride(primals_9, (100,), (1,))
assert_size_stride(primals_10, (60, 100), (100, 1))
assert_size_stride(primals_11, (60,), (1,))
assert_size_stride(primals_12, (1, 60), (60, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf17, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf16, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=128, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 200), (
1, 300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf5
buf15 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(12800)](buf6,
primals_7, buf15, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_8, (200, 100), (1, 200), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf7
buf14 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(6400)](buf8,
primals_9, buf14, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_10, (100, 60), (1, 100), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 60), (960, 240, 60, 1), 0)
del buf9
buf13 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_5[grid(3840)](buf10,
primals_11, buf13, 3840, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf10, (64, 60),
(60, 1), 0), reinterpret_tensor(primals_12, (60, 1), (1, 60), 0
), alpha=1, beta=1, out=buf12)
del primals_13
return (reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4,
reinterpret_tensor(buf6, (64, 200), (200, 1), 0),
reinterpret_tensor(buf8, (64, 100), (100, 1), 0),
reinterpret_tensor(buf10, (64, 60), (60, 1), 0), primals_12, buf13,
primals_10, buf14, primals_8, buf15, primals_6, buf16, primals_4, buf17
)
def weights_init(m):
if isinstance(m, (nn.Conv1d, nn.Linear)):
kaiming_normal(m.weight.data)
try:
kaiming_normal(m.bias.data)
except ValueError:
normal(m.bias.data)
class Net5New(nn.Module):
"""
Net5 is a neural network consisting of five hidden layers with sizes 400,
300, 200, 100 and 60
Furthermore there are three dropout layers
"""
hidden1 = 400
hidden2 = 300
hidden3 = 200
hidden4 = 100
hidden5 = 60
def __init__(self, input_size):
super(Net5New, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.drop1 = nn.Dropout(0.2)
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.drop2 = nn.Dropout(0.15)
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.drop3 = nn.Dropout(0.15)
self.fc5 = nn.Linear(self.hidden4, self.hidden5)
self.relu5 = nn.ReLU()
self.fc6 = nn.Linear(self.hidden5, 1)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_12 = self.fc6.weight
primals_13 = self.fc6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| moritzschaefer/pavooc | Net5 | false | 7,282 | [
"MIT"
] | 1 | 735f5455f9a95a5734436a24e2aa92cf600c91af | https://github.com/moritzschaefer/pavooc/tree/735f5455f9a95a5734436a24e2aa92cf600c91af | import torch
from torch import nn
from torch.nn.init import kaiming_normal
from torch.nn.init import normal
def weights_init(m):
if isinstance(m, (nn.Conv1d, nn.Linear)):
kaiming_normal(m.weight.data)
try:
kaiming_normal(m.bias.data)
except ValueError:
normal(m.bias.data)
class Model(nn.Module):
"""
Net5 is a neural network consisting of five hidden layers with sizes 400,
300, 200, 100 and 60
Furthermore there are three dropout layers
"""
hidden1 = 400
hidden2 = 300
hidden3 = 200
hidden4 = 100
hidden5 = 60
def __init__(self, input_size):
super().__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.drop1 = nn.Dropout(0.2)
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.drop2 = nn.Dropout(0.15)
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.drop3 = nn.Dropout(0.15)
self.fc5 = nn.Linear(self.hidden4, self.hidden5)
self.relu5 = nn.ReLU()
self.fc6 = nn.Linear(self.hidden5, 1)
self.apply(weights_init)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.drop1(out)
out = self.fc3(out)
out = self.relu3(out)
out = self.drop2(out)
out = self.fc4(out)
out = self.relu4(out)
out = self.drop3(out)
out = self.fc5(out)
out = self.relu5(out)
out = self.fc6(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Net4 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = (xindex // 1200)
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
# Source node to ATen node mapping:
# out_3 => relu_1
# out_4 => view_4
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {})
triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = (xindex // 300)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dz/cdzzjqufxgjdtwmtqoqggqn2ny2ysfyvvnngvb35noosm27wiln3.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_5 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ep/cepy3a3v2ftjseqnazzpg6ymclul67kiqspcli35c422aj3rouiq.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_7 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ix/cix6ohige22nx5mqvwy7agh5yjldprz3tavakjwe7i3isipk53ov.py
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_9 => relu_4
# Graph fragment:
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300, ), (1, ))
assert_size_stride(primals_6, (200, 300), (300, 1))
assert_size_stride(primals_7, (200, ), (1, ))
assert_size_stride(primals_8, (100, 200), (200, 1))
assert_size_stride(primals_9, (100, ), (1, ))
assert_size_stride(primals_10, (60, 100), (100, 1))
assert_size_stride(primals_11, (60, ), (1, ))
assert_size_stride(primals_12, (1, 60), (60, 1))
assert_size_stride(primals_13, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse
buf17 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf17, 25600, grid=grid(25600), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf16, 19200, grid=grid(19200), stream=stream0)
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view]
triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0)
del buf3
buf5 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 200), (1, 300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf5 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf6, primals_7, buf15, 12800, grid=grid(12800), stream=stream0)
del primals_7
buf7 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (64, 200), (200, 1), 0), reinterpret_tensor(primals_8, (200, 100), (1, 200), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf7 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf8, primals_9, buf14, 6400, grid=grid(6400), stream=stream0)
del primals_9
buf9 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (64, 100), (100, 1), 0), reinterpret_tensor(primals_10, (100, 60), (1, 100), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 60), (960, 240, 60, 1), 0); del buf9 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf10, primals_11, buf13, 3840, grid=grid(3840), stream=stream0)
del primals_11
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf10, (64, 60), (60, 1), 0), reinterpret_tensor(primals_12, (60, 1), (1, 60), 0), alpha=1, beta=1, out=buf12)
del primals_13
return (reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, reinterpret_tensor(buf6, (64, 200), (200, 1), 0), reinterpret_tensor(buf8, (64, 100), (100, 1), 0), reinterpret_tensor(buf10, (64, 60), (60, 1), 0), primals_12, buf13, primals_10, buf14, primals_8, buf15, primals_6, buf16, primals_4, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((200, 300), (300, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((100, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((60, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((60, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 60), (60, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn.init import kaiming_normal
from torch.nn.init import normal
def weights_init(m):
if isinstance(m, (nn.Conv1d, nn.Linear)):
kaiming_normal(m.weight.data)
try:
kaiming_normal(m.bias.data)
except ValueError:
normal(m.bias.data)
class Net4(nn.Module):
"""
Net4 is a neural network consisting of five hidden layers with sizes 400,
300, 200, 100 and 60
"""
hidden1 = 400
hidden2 = 300
hidden3 = 200
hidden4 = 100
hidden5 = 60
def __init__(self, input_size):
super(Net4, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.fc5 = nn.Linear(self.hidden4, self.hidden5)
self.relu5 = nn.ReLU()
self.fc6 = nn.Linear(self.hidden5, 1)
self.apply(weights_init)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.relu3(out)
out = self.fc4(out)
out = self.relu4(out)
out = self.fc5(out)
out = self.relu5(out)
out = self.fc6(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from torch.nn.init import kaiming_normal
from torch.nn.init import normal
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 300
x2 = xindex // 1200
x3 = xindex % 1200
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 19200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 300
x1 = xindex // 300
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 60
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (300, 400), (400, 1))
assert_size_stride(primals_5, (300,), (1,))
assert_size_stride(primals_6, (200, 300), (300, 1))
assert_size_stride(primals_7, (200,), (1,))
assert_size_stride(primals_8, (100, 200), (200, 1))
assert_size_stride(primals_9, (100,), (1,))
assert_size_stride(primals_10, (60, 100), (100, 1))
assert_size_stride(primals_11, (60,), (1,))
assert_size_stride(primals_12, (1, 60), (60, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf17, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1),
torch.float32)
buf16 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2,
primals_5, buf3, buf16, 19200, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
buf4 = buf2
del buf2
triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK
=128, num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 200), (
1, 300), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf5
buf15 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(12800)](buf6,
primals_7, buf15, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_8, (200, 100), (1, 200), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf7
buf14 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(6400)](buf8,
primals_9, buf14, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((64, 60), (60, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_10, (100, 60), (1, 100), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 60), (960, 240, 60, 1), 0)
del buf9
buf13 = empty_strided_cuda((4, 4, 4, 60), (960, 240, 60, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_5[grid(3840)](buf10,
primals_11, buf13, 3840, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf10, (64, 60),
(60, 1), 0), reinterpret_tensor(primals_12, (60, 1), (1, 60), 0
), alpha=1, beta=1, out=buf12)
del primals_13
return (reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4,
reinterpret_tensor(buf6, (64, 200), (200, 1), 0),
reinterpret_tensor(buf8, (64, 100), (100, 1), 0),
reinterpret_tensor(buf10, (64, 60), (60, 1), 0), primals_12, buf13,
primals_10, buf14, primals_8, buf15, primals_6, buf16, primals_4, buf17
)
def weights_init(m):
if isinstance(m, (nn.Conv1d, nn.Linear)):
kaiming_normal(m.weight.data)
try:
kaiming_normal(m.bias.data)
except ValueError:
normal(m.bias.data)
class Net4New(nn.Module):
"""
Net4 is a neural network consisting of five hidden layers with sizes 400,
300, 200, 100 and 60
"""
hidden1 = 400
hidden2 = 300
hidden3 = 200
hidden4 = 100
hidden5 = 60
def __init__(self, input_size):
super(Net4New, self).__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.fc5 = nn.Linear(self.hidden4, self.hidden5)
self.relu5 = nn.ReLU()
self.fc6 = nn.Linear(self.hidden5, 1)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_12 = self.fc6.weight
primals_13 = self.fc6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| moritzschaefer/pavooc | Net4 | false | 7,283 | [
"MIT"
] | 1 | 735f5455f9a95a5734436a24e2aa92cf600c91af | https://github.com/moritzschaefer/pavooc/tree/735f5455f9a95a5734436a24e2aa92cf600c91af | import torch
from torch import nn
from torch.nn.init import kaiming_normal
from torch.nn.init import normal
def weights_init(m):
if isinstance(m, (nn.Conv1d, nn.Linear)):
kaiming_normal(m.weight.data)
try:
kaiming_normal(m.bias.data)
except ValueError:
normal(m.bias.data)
class Model(nn.Module):
"""
Net4 is a neural network consisting of five hidden layers with sizes 400,
300, 200, 100 and 60
"""
hidden1 = 400
hidden2 = 300
hidden3 = 200
hidden4 = 100
hidden5 = 60
def __init__(self, input_size):
super().__init__()
self.fc1 = nn.Linear(input_size, self.hidden1)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden1, self.hidden2)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden2, self.hidden3)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden3, self.hidden4)
self.relu4 = nn.ReLU()
self.fc5 = nn.Linear(self.hidden4, self.hidden5)
self.relu5 = nn.ReLU()
self.fc6 = nn.Linear(self.hidden5, 1)
self.apply(weights_init)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.relu3(out)
out = self.fc4(out)
out = self.relu4(out)
out = self.fc5(out)
out = self.relu5(out)
out = self.fc6(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zi/czioyfiql36jvbru3amu3iggyuvnn5c4pypwuaiss36muc2jqtqb.py
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
# Source node to ATen node mapping:
# model_input => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mp/cmpdsbnpgfsr7uwb7env74mojrq3nlzieqot6rnnkfpbzkkensbi.py
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out1_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_4, buf6, 256, grid=grid(256), stream=stream0)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out2_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf4, primals_6, buf5, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out2 = self.fc2(model_input)
out1 = self.relu1(out1)
out2 = self.relu2(out2)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3,
primals_4, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256)](buf4,
primals_6, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return buf3, buf4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf5, buf6
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew(
torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(
NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| mrshu/onnxruntime | NeuralNetMultiplePositionalArgumentsMultiOutputsWithoutDependency | false | 7,284 | [
"MIT"
] | 1 | 335edaa2c485ba0dec877bf4cdbd652e2d5d105c | https://github.com/mrshu/onnxruntime/tree/335edaa2c485ba0dec877bf4cdbd652e2d5d105c | import torch
import torch.nn
import torch.onnx
class Model(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super().__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.fc2 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out2 = self.fc2(model_input)
out1 = self.relu1(out1)
out2 = self.relu2(out2)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
NIN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# einsum => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t6/ct6f57cdvyh3ahq6iwyawuy7577bar2ftumjxqllolmn4c4lh7ph.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.add]
# Source node to ATen node mapping:
# y => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 64, 4), (0, 4, 1), 0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 1, 16, 4), 0), reinterpret_tensor(buf0, (1, 4, 64), (256, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import string
import torch
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
def _einsum(a, b, c, x, y):
einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c))
return torch.einsum(einsum_str, x, y)
def contract_inner(x, y):
"""tensordot(x, y, 1)."""
x_chars = list(string.ascii_lowercase[:len(x.shape)])
y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x
.shape)])
y_chars[0] = x_chars[-1]
out_chars = x_chars[:-1] + y_chars[1:]
return _einsum(x_chars, y_chars, out_chars, x, y)
def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0,
dtype=torch.float32, device='cpu'):
def _compute_fans(shape, in_axis=1, out_axis=0):
receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis
]
fan_in = shape[in_axis] * receptive_field_size
fan_out = shape[out_axis] * receptive_field_size
return fan_in, fan_out
def init(shape, dtype=dtype, device=device):
fan_in, fan_out = _compute_fans(shape, in_axis, out_axis)
if mode == 'fan_in':
denominator = fan_in
elif mode == 'fan_out':
denominator = fan_out
elif mode == 'fan_avg':
denominator = (fan_in + fan_out) / 2
else:
raise ValueError(
'invalid mode for variance scaling initializer: {}'.format(
mode))
variance = scale / denominator
if distribution == 'normal':
return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(
variance)
elif distribution == 'uniform':
return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0
) * np.sqrt(3 * variance)
else:
raise ValueError(
'invalid distribution for variance scaling initializer')
return init
def default_init(scale=1.0):
"""The same initialization used in DDPM."""
scale = 1e-10 if scale == 0 else scale
return variance_scaling(scale, 'fan_avg', 'uniform')
class NIN(nn.Module):
def __init__(self, in_dim, num_units, init_scale=0.1):
super().__init__()
self.W = nn.Parameter(default_init(scale=init_scale)((in_dim,
num_units)), requires_grad=True)
self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True)
def forward(self, x):
x = x.permute(0, 2, 3, 1)
y = contract_inner(x, self.W) + self.b
return y.permute(0, 3, 1, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'num_units': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import string
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 64, 4), (0, 4, 1),
0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0),
out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 1, 16, 4), 0
), reinterpret_tensor(buf0, (1, 4, 64), (256, 1, 4), 0)
def _einsum(a, b, c, x, y):
einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c))
return torch.einsum(einsum_str, x, y)
def contract_inner(x, y):
"""tensordot(x, y, 1)."""
x_chars = list(string.ascii_lowercase[:len(x.shape)])
y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x
.shape)])
y_chars[0] = x_chars[-1]
out_chars = x_chars[:-1] + y_chars[1:]
return _einsum(x_chars, y_chars, out_chars, x, y)
def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0,
dtype=torch.float32, device='cpu'):
def _compute_fans(shape, in_axis=1, out_axis=0):
receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis
]
fan_in = shape[in_axis] * receptive_field_size
fan_out = shape[out_axis] * receptive_field_size
return fan_in, fan_out
def init(shape, dtype=dtype, device=device):
fan_in, fan_out = _compute_fans(shape, in_axis, out_axis)
if mode == 'fan_in':
denominator = fan_in
elif mode == 'fan_out':
denominator = fan_out
elif mode == 'fan_avg':
denominator = (fan_in + fan_out) / 2
else:
raise ValueError(
'invalid mode for variance scaling initializer: {}'.format(
mode))
variance = scale / denominator
if distribution == 'normal':
return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(
variance)
elif distribution == 'uniform':
return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0
) * np.sqrt(3 * variance)
else:
raise ValueError(
'invalid distribution for variance scaling initializer')
return init
def default_init(scale=1.0):
"""The same initialization used in DDPM."""
scale = 1e-10 if scale == 0 else scale
return variance_scaling(scale, 'fan_avg', 'uniform')
class NINNew(nn.Module):
def __init__(self, in_dim, num_units, init_scale=0.1):
super().__init__()
self.W = nn.Parameter(default_init(scale=init_scale)((in_dim,
num_units)), requires_grad=True)
self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True)
def forward(self, input_0):
primals_2 = self.W
primals_3 = self.b
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mrjavoman/Image-Super-Resolution-via-Iterative-Refinement | NIN | false | 7,285 | [
"Apache-2.0"
] | 1 | 2eb11d972e8e024c3b1d7a84f90895e329b5b408 | https://github.com/mrjavoman/Image-Super-Resolution-via-Iterative-Refinement/tree/2eb11d972e8e024c3b1d7a84f90895e329b5b408 | import string
import torch
import numpy as np
import torch.utils.data
import torch
import torch.nn as nn
def _einsum(a, b, c, x, y):
einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c))
return torch.einsum(einsum_str, x, y)
def contract_inner(x, y):
"""tensordot(x, y, 1)."""
x_chars = list(string.ascii_lowercase[:len(x.shape)])
y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x
.shape)])
y_chars[0] = x_chars[-1]
out_chars = x_chars[:-1] + y_chars[1:]
return _einsum(x_chars, y_chars, out_chars, x, y)
def variance_scaling(scale, mode, distribution, in_axis=1, out_axis=0,
dtype=torch.float32, device='cpu'):
def _compute_fans(shape, in_axis=1, out_axis=0):
receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis
]
fan_in = shape[in_axis] * receptive_field_size
fan_out = shape[out_axis] * receptive_field_size
return fan_in, fan_out
def init(shape, dtype=dtype, device=device):
fan_in, fan_out = _compute_fans(shape, in_axis, out_axis)
if mode == 'fan_in':
denominator = fan_in
elif mode == 'fan_out':
denominator = fan_out
elif mode == 'fan_avg':
denominator = (fan_in + fan_out) / 2
else:
raise ValueError(
'invalid mode for variance scaling initializer: {}'.format(
mode))
variance = scale / denominator
if distribution == 'normal':
return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(
variance)
elif distribution == 'uniform':
return (torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0
) * np.sqrt(3 * variance)
else:
raise ValueError(
'invalid distribution for variance scaling initializer')
return init
def default_init(scale=1.0):
"""The same initialization used in DDPM."""
scale = 1e-10 if scale == 0 else scale
return variance_scaling(scale, 'fan_avg', 'uniform')
class Model(nn.Module):
def __init__(self, in_dim, num_units, init_scale=0.1):
super().__init__()
self.W = nn.Parameter(default_init(scale=init_scale)((in_dim,
num_units)), requires_grad=True)
self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True)
def forward(self, x):
x = x.permute(0, 2, 3, 1)
y = contract_inner(x, self.W) + self.b
return y.permute(0, 3, 1, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zi/czioyfiql36jvbru3amu3iggyuvnn5c4pypwuaiss36muc2jqtqb.py
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
# Source node to ATen node mapping:
# model_input => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sb/csbqfhl3tbhobxxibww6rnv4q33jyajqsvetse4kiun22xct43oo.py
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out1_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [model_input], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [out1_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
return (buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf2, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch.onnx
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency(torch.
nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency,
self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out1 = self.relu(out1)
out2 = self.fc2(out1)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_relu_1[grid(256)](buf2, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
return buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf2, primals_5
class NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew(torch
.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependencyNew
, self).__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| mrshu/onnxruntime | NeuralNetMultiplePositionalArgumentsMultiOutputsWithDependency | false | 7,286 | [
"MIT"
] | 1 | 335edaa2c485ba0dec877bf4cdbd652e2d5d105c | https://github.com/mrshu/onnxruntime/tree/335edaa2c485ba0dec877bf4cdbd652e2d5d105c | import torch
import torch.nn
import torch.onnx
class Model(torch.
nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super().__init__()
self.fc1 = torch.nn.Linear(input_size, hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, input2):
model_input = input1 + input2
out1 = self.fc1(model_input)
out1 = self.relu(out1)
out2 = self.fc2(out1)
return out1, out2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
BoundReciprocal | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yb/cybxmqgstg473ic3ozmef5imn5esyxvm3ttfpkjco3dcshvnl2bq.py
# Topologically Sorted Source Nodes: [reciprocal], Original ATen: [aten.reciprocal]
# Source node to ATen node mapping:
# reciprocal => reciprocal
# Graph fragment:
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_reciprocal_0 = async_compile.triton('triton_poi_fused_reciprocal_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [reciprocal], Original ATen: [aten.reciprocal]
stream0 = get_raw_stream(0)
triton_poi_fused_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
deviation = A.norm(self.dual_norm, -1) * self.eps
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
else:
bound = x + sign * self.eps
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundActivation(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
self.relaxed = False
def _init_linear(self, x):
self.mask_pos = torch.gt(x.lower, 0)
self.mask_neg = torch.lt(x.upper, 0)
self.mask_both = 1 - self.mask_pos - self.mask_neg
self.lw = torch.zeros(x.lower.shape, device=self.device)
self.lb = self.lw.clone()
self.uw = self.lw.clone()
self.ub = self.lw.clone()
def _add_linear(self, mask, type, k, x0, y0):
if mask is None:
mask = 1
if type == 'lower':
w_out, b_out = self.lw, self.lb
else:
w_out, b_out = self.uw, self.ub
w_out += mask * k
b_out += mask * (-x0 * k + y0)
def bound_relax(self, x):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
def _bound_oneside(last_A, sign=-1):
if last_A is None:
return None, 0
if self.batch_dim == 0:
if sign == -1:
_A = last_A.clamp(min=0) * self.lw.unsqueeze(0
) + last_A.clamp(max=0) * self.uw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.lb.unsqueeze(0
) + last_A.clamp(max=0) * self.ub.unsqueeze(0)
elif sign == 1:
_A = last_A.clamp(min=0) * self.uw.unsqueeze(0
) + last_A.clamp(max=0) * self.lw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.ub.unsqueeze(0
) + last_A.clamp(max=0) * self.lb.unsqueeze(0)
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
elif self.batch_dim == -1:
mask = torch.gt(last_A, 0.0)
if sign == -1:
_A = last_A * (mask * self.lw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.uw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.lb.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.ub.unsqueeze(0).unsqueeze(1))
elif sign == 1:
_A = last_A * (mask * self.uw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.lw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.ub.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.lb.unsqueeze(0).unsqueeze(1))
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
else:
raise NotImplementedError
return _A, _bias
lA, lbias = _bound_oneside(last_lA, sign=-1)
uA, ubias = _bound_oneside(last_uA, sign=+1)
return [(lA, uA)], lbias, ubias
def bound_forward(self, dim_in, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
if self.lw.ndim > 0:
if x.lw is not None:
lw = self.lw.unsqueeze(1).clamp(min=0
) * x.lw + self.lw.unsqueeze(1).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(1).clamp(max=0
) * x.lw + self.uw.unsqueeze(1).clamp(min=0) * x.uw
else:
lw = uw = None
elif x.lw is not None:
lw = self.lw.unsqueeze(0).clamp(min=0) * x.lw + self.lw.unsqueeze(0
).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(0).clamp(min=0) * x.lw + self.uw.unsqueeze(0
).clamp(max=0) * x.uw
else:
lw = uw = None
lb = self.lw.clamp(min=0) * x.lb + self.lw.clamp(max=0
) * x.ub + self.lb
ub = self.uw.clamp(max=0) * x.lb + self.uw.clamp(min=0
) * x.ub + self.ub
return LinearBound(lw, lb, uw, ub)
def infer_batch_dim(self, batch_size, *x):
return x[0]
class BoundReciprocal(BoundActivation):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def forward(self, x):
return torch.reciprocal(x)
def bound_relax(self, x):
m = (x.lower + x.upper) / 2
kl = -1 / m.pow(2)
self._add_linear(mask=None, type='lower', k=kl, x0=m, y0=1.0 / m)
ku = -1.0 / (x.lower * x.upper)
self._add_linear(mask=None, type='upper', k=ku, x0=x.lower, y0=1.0 /
x.lower)
def interval_propagate(self, *v):
h_L, h_U = v[0]
return torch.reciprocal(h_U.float()), torch.reciprocal(h_L.float())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_name': 4, 'name': 4, 'ori_name': 4, 'attr': 4,
'inputs': 4, 'output_index': 4, 'options': _mock_config(loss_fusion
=MSELoss()), 'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
deviation = A.norm(self.dual_norm, -1) * self.eps
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
else:
bound = x + sign * self.eps
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
center = (x_U + x_L) / 2.0
diff = (x_U - x_L) / 2.0
if not A.identity == 1:
unfold_input = F.unfold(center, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound = prod.view(prod.size(0), prod.size(1), int(math.
sqrt(prod.size(2))), int(math.sqrt(prod.size(2))))
unfold_input = F.unfold(diff, kernel_size=A.patches.
size(-1), padding=A.padding, stride=A.stride
).transpose(-2, -1)
unfold_input = unfold_input.view(unfold_input.size(0),
unfold_input.size(1), -1, A.patches.size(-3), A.
patches.size(-2), A.patches.size(-1))
prod = unfold_input * A.patches.abs()
prod = prod.sum((-1, -2, -3)).transpose(-2, -1)
bound += sign * prod.view(prod.size(0), prod.size(1),
int(math.sqrt(prod.size(2))), int(math.sqrt(prod.
size(2))))
else:
bound = center + sign * diff
return bound
else:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
raise NotImplementedError()
if isinstance(A, eyeC) or isinstance(A, torch.Tensor):
return concretize_matrix(A)
elif isinstance(A, Patches):
return concretize_patches(A)
elif isinstance(A, BoundList):
for b in A.bound_list:
if isinstance(b, eyeC) or isinstance(b, torch.Tensor):
pass
else:
raise NotImplementedError()
def init(self, x, aux=None, forward=False):
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
else:
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
if self.norm == np.inf:
if self.x_L is None and self.x_U is None:
return 'PerturbationLpNorm(norm=inf, eps={})'.format(self.eps)
else:
return ('PerturbationLpNorm(norm=inf, eps={}, x_L={}, x_U={})'
.format(self.eps, self.x_L, self.x_U))
else:
return 'PerturbationLpNorm(norm={}, eps={})'.format(self.norm,
self.eps)
class PerturbationSynonym(Perturbation):
def __init__(self, budget, eps=1.0, use_simple=False):
super(PerturbationSynonym, self).__init__()
self._load_synonyms()
self.budget = budget
self.eps = eps
self.use_simple = use_simple
self.model = None
self.train = False
def __repr__(self):
return (
'perturbation(Synonym-based word substitution budget={}, eps={})'
.format(self.budget, self.eps))
def _load_synonyms(self, path='data/synonyms.json'):
with open(path) as file:
self.synonym = json.loads(file.read())
logger.info('Synonym list loaded for {} words'.format(len(self.
synonym)))
def set_train(self, train):
self.train = train
def concretize(self, x, A, sign, aux):
assert self.model is not None
x_rep, mask, can_be_replaced = aux
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
dim_out = A.shape[1]
max_num_cand = x_rep.shape[2]
mask_rep = torch.tensor(can_be_replaced, dtype=torch.float32,
device=A.device)
num_pos = int(np.max(np.sum(can_be_replaced, axis=-1)))
update_A = A.shape[-1] > num_pos * dim_word
if update_A:
bias = torch.bmm(A, (x * (1 - mask_rep).unsqueeze(-1)).reshape(
batch_size, -1, 1)).squeeze(-1)
else:
bias = 0.0
A = A.reshape(batch_size, dim_out, -1, dim_word)
A_new, x_new, x_rep_new, mask_new = [], [], [], []
zeros_A = torch.zeros(dim_out, dim_word, device=A.device)
zeros_w = torch.zeros(dim_word, device=A.device)
zeros_rep = torch.zeros(max_num_cand, dim_word, device=A.device)
zeros_mask = torch.zeros(max_num_cand, device=A.device)
for t in range(batch_size):
cnt = 0
for i in range(0, length):
if can_be_replaced[t][i]:
if update_A:
A_new.append(A[t, :, i, :])
x_new.append(x[t][i])
x_rep_new.append(x_rep[t][i])
mask_new.append(mask[t][i])
cnt += 1
if update_A:
A_new += [zeros_A] * (num_pos - cnt)
x_new += [zeros_w] * (num_pos - cnt)
x_rep_new += [zeros_rep] * (num_pos - cnt)
mask_new += [zeros_mask] * (num_pos - cnt)
if update_A:
A = torch.cat(A_new).reshape(batch_size, num_pos, dim_out, dim_word
).transpose(1, 2)
x = torch.cat(x_new).reshape(batch_size, num_pos, dim_word)
x_rep = torch.cat(x_rep_new).reshape(batch_size, num_pos,
max_num_cand, dim_word)
mask = torch.cat(mask_new).reshape(batch_size, num_pos, max_num_cand)
length = num_pos
A = A.reshape(batch_size, A.shape[1], length, -1).transpose(1, 2)
x = x.reshape(batch_size, length, -1, 1)
if sign == 1:
cmp, init = torch.max, -1e+30
else:
cmp, init = torch.min, 1e+30
init_tensor = torch.ones(batch_size, dim_out) * init
dp = [([init_tensor] * (self.budget + 1)) for i in range(0, length + 1)
]
dp[0][0] = torch.zeros(batch_size, dim_out)
A = A.reshape(batch_size * length, A.shape[2], A.shape[3])
Ax = torch.bmm(A, x.reshape(batch_size * length, x.shape[2], x.
shape[3])).reshape(batch_size, length, A.shape[1])
Ax_rep = torch.bmm(A, x_rep.reshape(batch_size * length,
max_num_cand, x.shape[2]).transpose(-1, -2)).reshape(batch_size,
length, A.shape[1], max_num_cand)
Ax_rep = Ax_rep * mask.unsqueeze(2) + init * (1 - mask).unsqueeze(2)
Ax_rep_bound = cmp(Ax_rep, dim=-1).values
if self.use_simple and self.train:
return torch.sum(cmp(Ax, Ax_rep_bound), dim=1) + bias
for i in range(1, length + 1):
dp[i][0] = dp[i - 1][0] + Ax[:, i - 1]
for j in range(1, self.budget + 1):
dp[i][j] = cmp(dp[i - 1][j] + Ax[:, i - 1], dp[i - 1][j - 1
] + Ax_rep_bound[:, i - 1])
dp = torch.cat(dp[length], dim=0).reshape(self.budget + 1,
batch_size, dim_out)
return cmp(dp, dim=0).values + bias
def init(self, x, aux=None, forward=False):
tokens, batch = aux
self.tokens = tokens
assert len(x.shape) == 3
batch_size, length, dim_word = x.shape[0], x.shape[1], x.shape[2]
max_pos = 1
can_be_replaced = np.zeros((batch_size, length), dtype=np.bool)
self._build_substitution(batch)
for t in range(batch_size):
cnt = 0
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
for i in range(len(tokens[t])):
if tokens[t][i] == '[UNK]' or len(candidates[i]
) == 0 or tokens[t][i] != candidates[i][0]:
continue
for w in candidates[i][1:]:
if w in self.model.vocab:
can_be_replaced[t][i] = True
cnt += 1
break
max_pos = max(max_pos, cnt)
dim = max_pos * dim_word
if forward:
eye = torch.eye(dim_word)
lw = torch.zeros(batch_size, dim, length, dim_word)
lb = torch.zeros_like(x)
word_embeddings = self.model.word_embeddings.weight
vocab = self.model.vocab
x_rep = [[[] for i in range(length)] for t in range(batch_size)]
max_num_cand = 1
for t in range(batch_size):
candidates = batch[t]['candidates']
if tokens[t][0] == '[CLS]':
candidates = [[]] + candidates + [[]]
cnt = 0
for i in range(length):
if can_be_replaced[t][i]:
word_embed = word_embeddings[vocab[tokens[t][i]]]
other_embed = x[t, i] - word_embed
if forward:
lw[t, cnt * dim_word:(cnt + 1) * dim_word, i, :] = eye
lb[t, i, :] = torch.zeros_like(word_embed)
for w in candidates[i][1:]:
if w in self.model.vocab:
x_rep[t][i].append(word_embeddings[self.model.
vocab[w]] + other_embed)
max_num_cand = max(max_num_cand, len(x_rep[t][i]))
cnt += 1
elif forward:
lb[t, i, :] = x[t, i, :]
if forward:
uw, ub = lw, lb
else:
lw = lb = uw = ub = None
zeros = torch.zeros(dim_word, device=x.device)
x_rep_, mask = [], []
for t in range(batch_size):
for i in range(length):
x_rep_ += x_rep[t][i] + [zeros] * (max_num_cand - len(x_rep
[t][i]))
mask += [1] * len(x_rep[t][i]) + [0] * (max_num_cand - len(
x_rep[t][i]))
x_rep_ = torch.cat(x_rep_).reshape(batch_size, length, max_num_cand,
dim_word)
mask = torch.tensor(mask, dtype=torch.float32, device=x.device
).reshape(batch_size, length, max_num_cand)
x_rep_ = x_rep_ * self.eps + x.unsqueeze(2) * (1 - self.eps)
inf = 1e+20
lower = torch.min(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * inf, dim=2).values
upper = torch.max(mask.unsqueeze(-1) * x_rep_ + (1 - mask).
unsqueeze(-1) * -inf, dim=2).values
lower = torch.min(lower, x)
upper = torch.max(upper, x)
return LinearBound(lw, lb, uw, ub, lower, upper), x, (x_rep_, mask,
can_be_replaced)
def _build_substitution(self, batch):
for t, example in enumerate(batch):
if 'candidates' not in example or example['candidates'] is None:
candidates = []
tokens = example['sentence'].strip().lower().split(' ')
for i in range(len(tokens)):
_cand = []
if tokens[i] in self.synonym:
for w in self.synonym[tokens[i]]:
if w in self.model.vocab:
_cand.append(w)
if len(_cand) > 0:
_cand = [tokens[i]] + _cand
candidates.append(_cand)
example['candidates'] = candidates
class Interval(tuple):
def __new__(self, lb=None, ub=None, ptb=None):
if ub is None:
assert isinstance(lb, tuple)
lb, ub = lb
return tuple.__new__(Interval, (lb, ub))
def __init__(self, lb, ub, ptb=None):
if ptb is None:
self.ptb = None
assert lb is ub
elif not isinstance(ptb, Perturbation):
raise ValueError(
'ptb must be a Perturbation object or None. Got type {}'.
format(type(ptb)))
else:
self.ptb = ptb
def __str__(self):
return '({}, {}) with ptb={}'.format(self[0], self[1], self.ptb)
def __repr__(self):
return 'Interval(lb={}, ub={}, ptb={})'.format(self[0], self[1],
self.ptb)
"""Checking if the other interval is tuple, keep the perturbation."""
@staticmethod
def make_interval(lb, ub, other):
if isinstance(other, Interval):
return Interval(lb, ub, other.ptb)
else:
return lb, ub
"""Given a tuple or Interval object, returns the norm and eps."""
@staticmethod
def get_perturbation(interval):
if isinstance(interval, Interval):
if isinstance(interval.ptb, PerturbationLpNorm):
return interval.ptb.norm, interval.ptb.eps
elif isinstance(interval.ptb, PerturbationSynonym):
return np.inf, 1.0
elif isinstance(interval.ptb, PerturbationL0Norm):
return 0, interval.ptb.eps, interval.ptb.ratio
elif interval.ptb is None:
raise RuntimeError(
'get_perturbation() encountered an interval that is not perturbed.'
)
else:
raise RuntimeError(
'get_perturbation() does not know how to handle {}'.
format(type(interval.ptb)))
else:
return np.inf, np.nan
"""Checking if a Interval or tuple object has perturbation enabled."""
@staticmethod
def is_perturbed(interval):
if isinstance(interval, Interval) and interval.ptb is None:
return False
else:
return True
class Bound(nn.Module):
def __init__(self, input_name, name, ori_name, attr={}, inputs=[],
output_index=0, options={}, device=None):
super().__init__()
self.output_name = []
(self.input_name, self.name, self.ori_name, self.attr, self.inputs,
self.output_index, self.options, self.device) = (input_name,
name, ori_name, attr, inputs, output_index, options, device)
self.fv = None
self.from_input = False
self.bounded = False
self.IBP_rets = None
self.perturbed = False
if options is not None and 'loss_fusion' in options:
self.loss_fusion = options['loss_fusion']
else:
self.loss_fusion = False
"""Check if the i-th input is with perturbation or not."""
def is_input_perturbed(self, i=0):
return self.inputs[i].perturbed
def forward(self, *x):
raise NotImplementedError
def interval_propagate(self, *v):
assert len(v) == 1
h_L, h_U = v[0]
return Interval.make_interval(self.forward(h_L), self.forward(h_U),
v[0])
def bound_forward(self, dim_in, last):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA):
raise NotImplementedError
def infer_batch_dim(self, batch_size, *x):
None
raise NotImplementedError
def broadcast_backward(self, A, x):
shape = x.default_shape
batch_dim = max(self.batch_dim, 0)
if isinstance(A, torch.Tensor):
if x.batch_dim == -1:
shape = torch.Size([A.shape[batch_dim + 1]] + list(shape))
dims = []
cnt_sum = A.ndim - len(shape) - 1
for i in range(1, A.ndim):
if i != self.batch_dim + 1 and cnt_sum > 0:
dims.append(i)
cnt_sum -= 1
if dims:
A = torch.sum(A, dim=dims)
else:
dims = list(range(1, 1 + A.ndim - 1 - len(shape)))
if dims:
A = torch.sum(A, dim=dims)
dims = []
for i in range(len(shape)):
if shape[i] == 1 and A.shape[i + 1] != 1:
dims.append(i + 1)
if dims:
A = torch.sum(A, dim=dims, keepdim=True)
assert A.shape[1:] == shape
elif type(A) == Patches:
pass
return A
@staticmethod
def broadcast_forward(dim_in, x, shape_res):
lw, lb, uw, ub = x.lw, x.lb, x.uw, x.ub
shape_x, shape_res = list(x.lb.shape), list(shape_res)
if lw is None:
lw = uw = torch.zeros(dim_in, *shape_x, device=lb.device)
has_batch_size = False
else:
has_batch_size = True
while len(shape_x) < len(shape_res):
if not has_batch_size:
lw, uw = lw.unsqueeze(0), uw.unsqueeze(0)
lb, ub = lb.unsqueeze(0), ub.unsqueeze(0)
shape_x = [1] + shape_x
has_batch_size = True
else:
lw, uw = lw.unsqueeze(2), uw.unsqueeze(2)
lb, ub = lb.unsqueeze(1), ub.unsqueeze(1)
shape_x = [shape_x[0], 1] + shape_x[1:]
repeat = [(shape_res[i] // shape_x[i]) for i in range(len(shape_x))]
lb, ub = lb.repeat(*repeat), ub.repeat(*repeat)
repeat = repeat[:1] + [1] + repeat[1:]
lw, uw = lw.repeat(*repeat), uw.repeat(*repeat)
return lw, lb, uw, ub
def get_bias(self, A, bias):
if A is None:
return 0
assert not isnan(A)
assert not isnan(bias)
if isinstance(A, torch.Tensor):
if torch.norm(A, p=1) < epsilon:
return 0
output_dim = A.shape[0]
if self.batch_dim != -1:
batch_size = A.shape[self.batch_dim + 1]
A_shape = [A.shape[0], np.prod(A.shape[1:self.batch_dim + 1
]).astype(np.int32), batch_size, np.prod(A.shape[self.
batch_dim + 2:]).astype(np.int32)]
A = A.reshape(*A_shape).permute(2, 0, 1, 3).reshape(batch_size,
output_dim, -1)
bias = bias.reshape(*A_shape[1:]).transpose(0, 1).reshape(
batch_size, -1, 1)
bias_new = A.matmul(bias).squeeze(-1).transpose(0, 1)
else:
batch_size = A.shape[1]
A = A.view(output_dim, batch_size, -1)
bias_new = A.matmul(bias.view(-1))
if isnan(bias_new):
return 0
else:
return bias_new
elif type(A) == Patches:
if torch.norm(A.patches, p=1) < epsilon:
return 0
if self.batch_dim != -1:
batch_size = bias.shape[0]
bias = F.unfold(bias, kernel_size=A.patches.size(-1),
stride=A.stride, padding=A.padding).transpose(-2, -1
).unsqueeze(-2)
bias.size(1)
patches = A.patches.view(A.patches.size(0), A.patches.size(
1), A.patches.size(-4), A.patches.size(-1) * A.patches.
size(-2) * A.patches.size(-3))
prod = bias * patches
bias_new = prod.sum(-1).transpose(-2, -1)
bias_new = bias_new.view(batch_size, bias_new.size(-2), int
(math.sqrt(bias_new.size(-1))), int(math.sqrt(bias_new.
size(-1))))
else:
patches = A.patches
patches_reshape = torch.sum(patches, dim=(-1, -2, -3)) * bias
patches_reshape = patches_reshape.transpose(-1, -2)
return patches_reshape.view(patches_reshape.size(0),
patches_reshape.size(1), int(math.sqrt(patches_reshape.
size(2))), -1).transpose(0, 1)
return bias_new
else:
return NotImplementedError()
class BoundActivation(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
self.relaxed = False
def _init_linear(self, x):
self.mask_pos = torch.gt(x.lower, 0)
self.mask_neg = torch.lt(x.upper, 0)
self.mask_both = 1 - self.mask_pos - self.mask_neg
self.lw = torch.zeros(x.lower.shape, device=self.device)
self.lb = self.lw.clone()
self.uw = self.lw.clone()
self.ub = self.lw.clone()
def _add_linear(self, mask, type, k, x0, y0):
if mask is None:
mask = 1
if type == 'lower':
w_out, b_out = self.lw, self.lb
else:
w_out, b_out = self.uw, self.ub
w_out += mask * k
b_out += mask * (-x0 * k + y0)
def bound_relax(self, x):
raise NotImplementedError
def bound_backward(self, last_lA, last_uA, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
def _bound_oneside(last_A, sign=-1):
if last_A is None:
return None, 0
if self.batch_dim == 0:
if sign == -1:
_A = last_A.clamp(min=0) * self.lw.unsqueeze(0
) + last_A.clamp(max=0) * self.uw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.lb.unsqueeze(0
) + last_A.clamp(max=0) * self.ub.unsqueeze(0)
elif sign == 1:
_A = last_A.clamp(min=0) * self.uw.unsqueeze(0
) + last_A.clamp(max=0) * self.lw.unsqueeze(0)
_bias = last_A.clamp(min=0) * self.ub.unsqueeze(0
) + last_A.clamp(max=0) * self.lb.unsqueeze(0)
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
elif self.batch_dim == -1:
mask = torch.gt(last_A, 0.0)
if sign == -1:
_A = last_A * (mask * self.lw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.uw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.lb.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.ub.unsqueeze(0).unsqueeze(1))
elif sign == 1:
_A = last_A * (mask * self.uw.unsqueeze(0).unsqueeze(1) +
(1 - mask) * self.lw.unsqueeze(0).unsqueeze(1))
_bias = last_A * (mask * self.ub.unsqueeze(0).unsqueeze
(1) + (1 - mask) * self.lb.unsqueeze(0).unsqueeze(1))
while _bias.ndim > 2:
_bias = torch.sum(_bias, dim=-1)
else:
raise NotImplementedError
return _A, _bias
lA, lbias = _bound_oneside(last_lA, sign=-1)
uA, ubias = _bound_oneside(last_uA, sign=+1)
return [(lA, uA)], lbias, ubias
def bound_forward(self, dim_in, x):
if not self.relaxed:
self._init_linear(x)
self.bound_relax(x)
if self.lw.ndim > 0:
if x.lw is not None:
lw = self.lw.unsqueeze(1).clamp(min=0
) * x.lw + self.lw.unsqueeze(1).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(1).clamp(max=0
) * x.lw + self.uw.unsqueeze(1).clamp(min=0) * x.uw
else:
lw = uw = None
elif x.lw is not None:
lw = self.lw.unsqueeze(0).clamp(min=0) * x.lw + self.lw.unsqueeze(0
).clamp(max=0) * x.uw
uw = self.uw.unsqueeze(0).clamp(min=0) * x.lw + self.uw.unsqueeze(0
).clamp(max=0) * x.uw
else:
lw = uw = None
lb = self.lw.clamp(min=0) * x.lb + self.lw.clamp(max=0
) * x.ub + self.lb
ub = self.uw.clamp(max=0) * x.lb + self.uw.clamp(min=0
) * x.ub + self.ub
return LinearBound(lw, lb, uw, ub)
def infer_batch_dim(self, batch_size, *x):
return x[0]
class BoundReciprocalNew(BoundActivation):
def __init__(self, input_name, name, ori_name, attr, inputs,
output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs,
output_index, options, device)
self.nonlinear = True
def bound_relax(self, x):
m = (x.lower + x.upper) / 2
kl = -1 / m.pow(2)
self._add_linear(mask=None, type='lower', k=kl, x0=m, y0=1.0 / m)
ku = -1.0 / (x.lower * x.upper)
self._add_linear(mask=None, type='upper', k=ku, x0=x.lower, y0=1.0 /
x.lower)
def interval_propagate(self, *v):
h_L, h_U = v[0]
return torch.reciprocal(h_U.float()), torch.reciprocal(h_L.float())
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mnmueller/auto_LiRPA | BoundReciprocal | false | 7,287 | [
"BSD-3-Clause"
] | 1 | 55cb270b0b99f07b74541d55706c69fbb9daff66 | https://github.com/mnmueller/auto_LiRPA/tree/55cb270b0b99f07b74541d55706c69fbb9daff66 | from _paritybench_helpers import _mock_config
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import MSELoss
def isnan(x):
if isinstance(x, Patches):
return False
return torch.isnan(x).any()
class Perturbation:
def __init__(self):
pass
def set_eps(self, eps):
self.eps = eps
def concretize(self, x, A, sign=-1, aux=None):
raise NotImplementedError
def init(self, x, aux=None, forward=False):
raise NotImplementedError
class PerturbationL0Norm(Perturbation):
def __init__(self, eps, x_L=None, x_U=None, ratio=1.0):
self.eps = eps
self.x_U = x_U
self.x_L = x_L
self.ratio = ratio
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
eps = math.ceil(self.eps)
x = x.reshape(x.shape[0], -1, 1)
center = A.matmul(x)
x = x.reshape(x.shape[0], 1, -1)
original = A * x.expand(x.shape[0], A.shape[-2], x.shape[2])
neg_mask = A < 0
pos_mask = A >= 0
if sign == 1:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = A[pos_mask] - original[pos_mask]
A_diff[neg_mask] = -original[neg_mask]
else:
A_diff = torch.zeros_like(A)
A_diff[pos_mask] = original[pos_mask]
A_diff[neg_mask] = original[neg_mask] - A[neg_mask]
A_diff, _ = torch.sort(A_diff, dim=2, descending=True)
bound = center + sign * A_diff[:, :, :eps].sum(dim=2).unsqueeze(2
) * self.ratio
return bound.squeeze(2)
def init(self, x, aux=None, forward=False):
x_L = x
x_U = x
if not forward:
return LinearBound(None, None, None, None, x_L, x_U), x, None
batch_size = x.shape[0]
dim = x.reshape(batch_size, -1).shape[-1]
eye = torch.eye(dim).unsqueeze(0).repeat(batch_size, 1, 1)
lw = eye.reshape(batch_size, dim, *x.shape[1:])
lb = torch.zeros_like(x)
uw, ub = lw.clone(), lb.clone()
return LinearBound(lw, lb, uw, ub, x_L, x_U), x, None
def __repr__(self):
return 'PerturbationLpNorm(norm=0, eps={})'.format(self.eps)
class PerturbationLpNorm(Perturbation):
def __init__(self, eps, norm=np.inf, x_L=None, x_U=None):
self.eps = eps
self.norm = norm
self.dual_norm = 1 if norm == np.inf else np.float64(1.0) / (1 -
1.0 / self.norm)
self.x_L = x_L
self.x_U = x_U
"""Given an variable x and its bound matrix A, compute worst case bound according to Lp norm."""
def concretize(self, x, A, sign=-1, aux=None):
if A is None:
return None
def concretize_matrix(A):
nonlocal x
if not isinstance(A, eyeC):
A = A.reshape(A.shape[0], A.shape[1], -1)
if self.norm == np.inf:
x_L = x - self.eps if self.x_L is None else self.x_L
x_U = x + self.eps if self.x_U is None else self.x_U
x_ub = x_U.reshape(x_U.shape[0], -1, 1)
x_lb = x_L.reshape(x_L.shape[0], -1, 1)
center = (x_ub + x_lb) / 2.0
diff = (x_ub - x_lb) / 2.0
if not isinstance(A, eyeC):
bound = A.matmul(center) + sign * A.abs().matmul(diff)
else:
bound = center + sign * diff
else:
x = x.reshape(x.shape[0], -1, 1)
if not isinstance(A, eyeC):
deviation = A.norm(self.dual_norm, -1) * self.eps
bound = A.matmul(x) + sign * deviation.unsqueeze(-1)
else:
bound = x + sign * self.eps
bound = bound.squeeze(-1)
return bound
def concretize_patches(A):
nonlocal x
if self.norm == np.inf:
x_L = x -
# ... truncated (>4000 chars) for memory efficiency |
WeightL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/u7/cu7vsiayqefb3g3w7t2akur6uz432klzvqwftk25bgwdpcb3zben.py
# Topologically Sorted Source Nodes: [sub, diff, sum_1, diff_1, loss, sum_2, div], Original ATen: [aten.sub, aten.abs, aten.sum, aten.view, aten.mul, aten.div]
# Source node to ATen node mapping:
# diff => abs_1
# diff_1 => view_1
# div => div
# loss => mul
# sub => sub
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [1]), kwargs = {})
# %view_1 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%sum_1, [4, -1, 4, 4]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %arg2_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_per_fused_abs_div_mul_sub_sum_view_0 = async_compile.triton('triton_per_fused_abs_div_mul_sub_sum_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_div_mul_sub_sum_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_div_mul_sub_sum_view_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r2 = (rindex // 64)
r4 = rindex % 64
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r4), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (64 + r4), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (128 + r4), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (192 + r4), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (r3), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp20 = tmp18 * tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = 0.25
tmp25 = tmp23 * tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp25, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, diff, sum_1, diff_1, loss, sum_2, div], Original ATen: [aten.sub, aten.abs, aten.sum, aten.view, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_abs_div_mul_sub_sum_view_0.run(buf2, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class WeightL1Loss(nn.Module):
def __init__(self):
super(WeightL1Loss, self).__init__()
def forward(self, pred_loc, label_loc, loss_weight):
b, _, sh, sw = pred_loc.size()
pred_loc = pred_loc.view(b, 4, -1, sh, sw)
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1).view(b, -1, sh, sw)
loss = diff * loss_weight
return loss.sum().div(b)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_div_mul_sub_sum_view_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r2 = rindex // 64
r4 = rindex % 64
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + r4, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (64 + r4), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (128 + r4), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (192 + r4), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + r3, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.abs(tmp6)
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tl_math.abs(tmp11)
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tl_math.abs(tmp16)
tmp18 = tmp13 + tmp17
tmp20 = tmp18 * tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = 0.25
tmp25 = tmp23 * tmp24
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_abs_div_mul_sub_sum_view_0[grid(1)](buf2, arg0_1,
arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class WeightL1LossNew(nn.Module):
def __init__(self):
super(WeightL1LossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| mshmoon/siamrpn-lightweight | WeightL1Loss | false | 7,288 | [
"MIT"
] | 1 | f6527e34c9eaaeb45817b12babd78ee73b1c7525 | https://github.com/mshmoon/siamrpn-lightweight/tree/f6527e34c9eaaeb45817b12babd78ee73b1c7525 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred_loc, label_loc, loss_weight):
b, _, sh, sw = pred_loc.size()
pred_loc = pred_loc.view(b, 4, -1, sh, sw)
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1).view(b, -1, sh, sw)
loss = diff * loss_weight
return loss.sum().div(b)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
Corr | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ni/cnivqy5as35um6234jqmngvb6hqujnfg2rfa3oaeqyy4coozutwo.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %view_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 16), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (16*x1)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 4, 4), (256, 1, 64, 16), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(arg1_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del arg1_1
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, reinterpret_tensor(arg0_1, (16, 1, 4, 4), (16, 16, 4, 1), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=16, bias=None)
assert_size_stride(buf1, (1, 16, 1, 1), (16, 1, 16, 16))
del arg0_1
del buf0
return (reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Corr(nn.Module):
def __init__(self):
super(Corr, self).__init__()
def forward(self, x, kernel):
batch = kernel.size(0)
channel = kernel.size(1)
x = x.view(1, batch * channel, x.size(2), x.size(3))
kernel = kernel.view(batch * channel, 1, kernel.size(2), kernel.size(3)
)
out = F.conv2d(x, kernel, groups=batch * channel)
out = out.view(batch, channel, out.size(2), out.size(3))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask)
tl.store(out_ptr0 + (y0 + 16 * x1), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 4, 4), (256, 1, 64, 16), torch.
float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = extern_kernels.convolution(buf0, reinterpret_tensor(arg0_1,
(16, 1, 4, 4), (16, 16, 4, 1), 0), stride=(1, 1), padding=(0, 0
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=16, bias=None)
assert_size_stride(buf1, (1, 16, 1, 1), (16, 1, 16, 16))
del arg0_1
del buf0
return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0),
class CorrNew(nn.Module):
def __init__(self):
super(CorrNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mshmoon/siamrpn-lightweight | Corr | false | 7,289 | [
"MIT"
] | 1 | f6527e34c9eaaeb45817b12babd78ee73b1c7525 | https://github.com/mshmoon/siamrpn-lightweight/tree/f6527e34c9eaaeb45817b12babd78ee73b1c7525 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, kernel):
batch = kernel.size(0)
channel = kernel.size(1)
x = x.view(1, batch * channel, x.size(2), x.size(3))
kernel = kernel.view(batch * channel, 1, kernel.size(2), kernel.size(3)
)
out = F.conv2d(x, kernel, groups=batch * channel)
out = out.view(batch, channel, out.size(2), out.size(3))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BernoulliLogProb | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/na/cnacquvk53qgxzozitnbm3dsdswtqqnwwktg7ckxobsyjyvyz2rd.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, neg], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# neg => neg_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_2,), kwargs = {})
triton_poi_fused_binary_cross_entropy_with_logits_neg_0 = async_compile.triton('triton_poi_fused_binary_cross_entropy_with_logits_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_binary_cross_entropy_with_logits_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_neg_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = -tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, neg], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg]
stream0 = get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_neg_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils
import torch.utils.data
class BernoulliLogProb(nn.Module):
def __init__(self):
super().__init__()
self.bce_with_logits = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits, target):
return -self.bce_with_logits(logits, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_binary_cross_entropy_with_logits_neg_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = -tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_binary_cross_entropy_with_logits_neg_0[grid(256)](
arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BernoulliLogProbNew(nn.Module):
def __init__(self):
super().__init__()
self.bce_with_logits = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| msunardi/vae_experiment | BernoulliLogProb | false | 7,290 | [
"MIT"
] | 1 | e3ce39e586f1189d157e753370a90c07713658b3 | https://github.com/msunardi/vae_experiment/tree/e3ce39e586f1189d157e753370a90c07713658b3 | import torch
import torch.nn as nn
import torch.utils
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
self.bce_with_logits = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits, target):
return -self.bce_with_logits(logits, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LogSoftMax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/z5/cz5f25pcqiiedfuqxbk2cnpgag5ontaxdajgnly6msqpsesz47x3.py
# Topologically Sorted Source Nodes: [cls_1, cls_2], Original ATen: [aten.clone, aten._log_softmax]
# Source node to ATen node mapping:
# cls_1 => clone
# cls_2 => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [4], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [4], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_clone_0 = async_compile.triton('triton_poi_fused__log_softmax_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 2], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 32
y1 = (yindex // 32)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (32*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + (64*y1)), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (32 + y0 + (64*y1)), ymask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tmp1 - tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 - tmp3
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tmp4 - tmp10
tl.store(out_ptr0 + (x2 + (2*y3)), tmp11, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4, 2), (64, 32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [cls_1, cls_2], Original ATen: [aten.clone, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_clone_0.run(arg0_1, buf0, 128, 2, grid=grid(128, 2), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LogSoftMax(nn.Module):
def __init__(self):
super(LogSoftMax, self).__init__()
def forward(self, cls):
b, a2, h, w = cls.size()
cls = cls.view(b, 2, a2 // 2, h, w)
cls = cls.permute(0, 2, 3, 4, 1).contiguous()
cls = F.log_softmax(cls, dim=4)
return cls
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_clone_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 2
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 32
y1 = yindex // 32
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 32 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + 64 * y1), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (32 + y0 + 64 * y1), ymask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tmp1 - tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 - tmp3
tmp8 = tl_math.exp(tmp7)
tmp9 = tmp6 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tmp4 - tmp10
tl.store(out_ptr0 + (x2 + 2 * y3), tmp11, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4, 2), (64, 32, 8, 2, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_clone_0[grid(128, 2)](arg0_1, buf0,
128, 2, XBLOCK=2, YBLOCK=64, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LogSoftMaxNew(nn.Module):
def __init__(self):
super(LogSoftMaxNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mshmoon/siamrpn-lightweight | LogSoftMax | false | 7,291 | [
"MIT"
] | 1 | f6527e34c9eaaeb45817b12babd78ee73b1c7525 | https://github.com/mshmoon/siamrpn-lightweight/tree/f6527e34c9eaaeb45817b12babd78ee73b1c7525 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, cls):
b, a2, h, w = cls.size()
cls = cls.view(b, 2, a2 // 2, h, w)
cls = cls.permute(0, 2, 3, 4, 1).contiguous()
cls = F.log_softmax(cls, dim=4)
return cls
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DistillLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mc/cmc44gqwlbgitm3uqkuiwz6fe3jirwculg7zmyndeuqzyyqzyok7.py
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax_1 => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hg/chgnua7xriwlaya5uhb6ovx4n7dh6g35t7drug4ivsx7pil4obce.py
# Topologically Sorted Source Nodes: [softmax, hard_target_loss], Original ATen: [aten._softmax, aten._log_softmax]
# Source node to ATen node mapping:
# hard_target_loss => amax_2, sub_3
# softmax => exp
# Graph fragment:
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_2), kwargs = {})
triton_poi_fused__log_softmax__softmax_1 = async_compile.triton('triton_poi_fused__log_softmax__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = triton_helpers.maximum(tmp3, tmp5)
tmp19 = triton_helpers.maximum(tmp18, tmp8)
tmp20 = triton_helpers.maximum(tmp19, tmp11)
tmp21 = tmp0 - tmp20
tl.store(out_ptr0 + (x3), tmp17, xmask)
tl.store(out_ptr1 + (x3), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fe/cfewtwrwn2vcoeia6efsrmfaqf6spm7ezczk3g6lc4ndbmjov6wy.py
# Topologically Sorted Source Nodes: [softmax_1, kl_div, softmax, soft_target_loss, mul_1, hard_target_loss, mul_2, total_loss], Original ATen: [aten._softmax, aten.xlogy, aten.mul, aten.sub, aten.sum, aten.div, aten._log_softmax, aten.neg, aten.add]
# Source node to ATen node mapping:
# hard_target_loss => div_5, exp_2, log_1, mul_3, neg, sub_4, sum_4, sum_5
# kl_div => div_4, eq, full_default, full_default_1, isnan, log, mul, mul_1, sub_2, sum_3, where, where_1
# mul_1 => mul_4
# mul_2 => mul_5
# soft_target_loss => mul_2
# softmax => div_1, sum_1
# softmax_1 => div_3, sum_2
# total_loss => add
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_3 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_3,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_3, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_3,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, %log), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, %div_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 4), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_4, 16), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 4), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_4,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %log_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %arg2_1), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_5,), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_5, -3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {})
triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 16, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp19 = tl.load(in_ptr2 + (r3), None)
tmp20 = tl.load(in_ptr2 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr3 + (r3), None)
tmp37 = tl.load(in_ptr3 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp38 = tl.load(in_ptr3 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr3 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tmp19 / tmp26
tmp28 = libdevice.isnan(tmp27).to(tl.int1)
tmp29 = 0.0
tmp30 = tmp27 == tmp29
tmp31 = tl_math.log(tmp27)
tmp32 = tmp27 * tmp31
tmp33 = tl.where(tmp30, tmp29, tmp32)
tmp34 = float("nan")
tmp35 = tl.where(tmp28, tmp34, tmp33)
tmp39 = tmp37 + tmp38
tmp41 = tmp39 + tmp40
tmp43 = tmp41 + tmp42
tmp44 = tmp36 / tmp43
tmp45 = tmp27 * tmp44
tmp46 = tmp35 - tmp45
tmp47 = tl.broadcast_to(tmp46, [RBLOCK])
tmp49 = triton_helpers.promote_to_tensor(tl.sum(tmp47, 0))
tmp50 = 0.25
tmp51 = tmp49 * tmp50
tmp52 = 16.0
tmp53 = tmp51 * tmp52
tmp54 = -tmp18
tmp55 = 0.015625
tmp56 = tmp54 * tmp55
tmp57 = 4.0
tmp58 = tmp53 * tmp57
tmp59 = -3.0
tmp60 = tmp56 * tmp59
tmp61 = tmp58 + tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp53, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp56, None)
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp61, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax, hard_target_loss], Original ATen: [aten._softmax, aten._log_softmax]
triton_poi_fused__log_softmax__softmax_1.run(arg1_1, buf2, buf5, 256, grid=grid(256), stream=stream0)
del arg1_1
buf6 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
buf7 = buf6; del buf6 # reuse
buf8 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1, kl_div, softmax, soft_target_loss, mul_1, hard_target_loss, mul_2, total_loss], Original ATen: [aten._softmax, aten.xlogy, aten.mul, aten.sub, aten.sum, aten.div, aten._log_softmax, aten.neg, aten.add]
triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2.run(buf4, buf7, buf5, arg2_1, buf0, buf2, buf8, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del buf0
del buf2
del buf5
return (buf4, buf7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class DistillLoss(nn.Module):
def __init__(self, temperature, distillation_weight):
super().__init__()
self.temperature = temperature
self.distillation_weight = distillation_weight
self.kldiv = nn.KLDivLoss(reduction='batchmean')
def forward(self, outputs, labels, outputs_teacher):
"""Compute distillation loss given outputs, labels, and outputs of teacher model
Arguments:
outputs {[type]} -- [description]
labels {[type]} -- [description]
output_teacher {[type]} -- [description]
"""
soft_target_loss = 0
if outputs_teacher is not None and self.distillation_weight > 0:
soft_target_loss = self.kldiv(F.softmax(outputs / self.
temperature, dim=1), F.softmax(outputs_teacher / self.
temperature, dim=1)) * self.temperature ** 2
hard_target_loss = F.cross_entropy(outputs, labels, reduction='mean')
total_loss = (soft_target_loss * self.distillation_weight +
hard_target_loss * (1 - self.distillation_weight))
return soft_target_loss, hard_target_loss, total_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'temperature': 4, 'distillation_weight': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = triton_helpers.maximum(tmp3, tmp5)
tmp19 = triton_helpers.maximum(tmp18, tmp8)
tmp20 = triton_helpers.maximum(tmp19, tmp11)
tmp21 = tmp0 - tmp20
tl.store(out_ptr0 + x3, tmp17, xmask)
tl.store(out_ptr1 + x3, tmp21, xmask)
@triton.jit
def triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp19 = tl.load(in_ptr2 + r3, None)
tmp20 = tl.load(in_ptr2 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr2 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr2 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr3 + r3, None)
tmp37 = tl.load(in_ptr3 + (r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp38 = tl.load(in_ptr3 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp40 = tl.load(in_ptr3 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr3 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp26 = tmp24 + tmp25
tmp27 = tmp19 / tmp26
tmp28 = libdevice.isnan(tmp27).to(tl.int1)
tmp29 = 0.0
tmp30 = tmp27 == tmp29
tmp31 = tl_math.log(tmp27)
tmp32 = tmp27 * tmp31
tmp33 = tl.where(tmp30, tmp29, tmp32)
tmp34 = float('nan')
tmp35 = tl.where(tmp28, tmp34, tmp33)
tmp39 = tmp37 + tmp38
tmp41 = tmp39 + tmp40
tmp43 = tmp41 + tmp42
tmp44 = tmp36 / tmp43
tmp45 = tmp27 * tmp44
tmp46 = tmp35 - tmp45
tmp47 = tl.broadcast_to(tmp46, [RBLOCK])
tmp49 = triton_helpers.promote_to_tensor(tl.sum(tmp47, 0))
tmp50 = 0.25
tmp51 = tmp49 * tmp50
tmp52 = 16.0
tmp53 = tmp51 * tmp52
tmp54 = -tmp18
tmp55 = 0.015625
tmp56 = tmp54 * tmp55
tmp57 = 4.0
tmp58 = tmp53 * tmp57
tmp59 = -3.0
tmp60 = tmp56 * tmp59
tmp61 = tmp58 + tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp53, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp56, None)
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp61, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_1[grid(256)](arg1_1, buf2,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf6 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__log_softmax__softmax_add_div_mul_neg_sub_sum_xlogy_2[
grid(1)](buf4, buf7, buf5, arg2_1, buf0, buf2, buf8, 1, 256,
num_warps=2, num_stages=1)
del arg2_1
del buf0
del buf2
del buf5
return buf4, buf7, buf8
class DistillLossNew(nn.Module):
def __init__(self, temperature, distillation_weight):
super().__init__()
self.temperature = temperature
self.distillation_weight = distillation_weight
self.kldiv = nn.KLDivLoss(reduction='batchmean')
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1], output[2]
| mrtunguyen/knowledge_distillation | DistillLoss | false | 7,292 | [
"MIT"
] | 1 | dd114e980dbebda6cc247f658eb801ab948ee6ba | https://github.com/mrtunguyen/knowledge_distillation/tree/dd114e980dbebda6cc247f658eb801ab948ee6ba | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, temperature, distillation_weight):
super().__init__()
self.temperature = temperature
self.distillation_weight = distillation_weight
self.kldiv = nn.KLDivLoss(reduction='batchmean')
def forward(self, outputs, labels, outputs_teacher):
"""Compute distillation loss given outputs, labels, and outputs of teacher model
Arguments:
outputs {[type]} -- [description]
labels {[type]} -- [description]
output_teacher {[type]} -- [description]
"""
soft_target_loss = 0
if outputs_teacher is not None and self.distillation_weight > 0:
soft_target_loss = self.kldiv(F.softmax(outputs / self.
temperature, dim=1), F.softmax(outputs_teacher / self.
temperature, dim=1)) * self.temperature ** 2
hard_target_loss = F.cross_entropy(outputs, labels, reduction='mean')
total_loss = (soft_target_loss * self.distillation_weight +
hard_target_loss * (1 - self.distillation_weight))
return soft_target_loss, hard_target_loss, total_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LinRegModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gv/cgvpra4kwn5idcnpg33dwbcypnrjm2z2np7rzbwqwz3kurrqhisn.py
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp1 * tmp2
tmp6 = tmp3 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_3
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LinRegModel(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.randn(1))
self.b = nn.Parameter(torch.randn(1))
def forward(self, x):
return self.a * x + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tmp1 * tmp2
tmp6 = tmp3 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
class LinRegModelNew(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.randn(1))
self.b = nn.Parameter(torch.randn(1))
def forward(self, input_0):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| muellerzr/walk-with-deep-learning | LinRegModel | false | 7,293 | [
"Apache-2.0"
] | 1 | 4adbf26da4885d122ed305eccef3efbb6fb10df5 | https://github.com/muellerzr/walk-with-deep-learning/tree/4adbf26da4885d122ed305eccef3efbb6fb10df5 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.randn(1))
self.b = nn.Parameter(torch.randn(1))
def forward(self, x):
return self.a * x + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
NormalLogProb | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kb/ckbntelp7fgmj6oe672w24ehydu67fnblllvt2ppxjo4bax42m3l.py
# Topologically Sorted Source Nodes: [var, mul, log, mul_1, sub, pow_2, mul_2, truediv, sub_1], Original ATen: [aten.pow, aten.mul, aten.log, aten.sub, aten.div]
# Source node to ATen node mapping:
# log => log
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# truediv => div
# var => pow_1
# Graph fragment:
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 6.283185307179586), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, -0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %mul_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %div), kwargs = {})
triton_poi_fused_div_log_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_div_log_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_log_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_log_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp7 = tl.load(in_ptr1 + (x0), xmask)
tmp8 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 6.283185307179586
tmp3 = tmp1 * tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = -0.5
tmp6 = tmp4 * tmp5
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 2.0
tmp12 = tmp1 * tmp11
tmp13 = tmp10 / tmp12
tmp14 = tmp6 - tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [var, mul, log, mul_1, sub, pow_2, mul_2, truediv, sub_1], Original ATen: [aten.pow, aten.mul, aten.log, aten.sub, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_log_mul_pow_sub_0.run(arg0_1, arg1_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.utils
import torch.utils.data
class NormalLogProb(nn.Module):
def __init__(self):
super().__init__()
def forward(self, loc, scale, z):
var = torch.pow(scale, 2)
return -0.5 * torch.log(2 * np.pi * var) - torch.pow(z - loc, 2) / (
2 * var)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_log_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 6.283185307179586
tmp3 = tmp1 * tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = -0.5
tmp6 = tmp4 * tmp5
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 2.0
tmp12 = tmp1 * tmp11
tmp13 = tmp10 / tmp12
tmp14 = tmp6 - tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_log_mul_pow_sub_0[grid(256)](arg0_1, arg1_1,
arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class NormalLogProbNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| msunardi/vae_experiment | NormalLogProb | false | 7,294 | [
"MIT"
] | 1 | e3ce39e586f1189d157e753370a90c07713658b3 | https://github.com/msunardi/vae_experiment/tree/e3ce39e586f1189d157e753370a90c07713658b3 | import torch
import numpy as np
import torch.nn as nn
import torch.utils
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, loc, scale, z):
var = torch.pow(scale, 2)
return -0.5 * torch.log(2 * np.pi * var) - torch.pow(z - loc, 2) / (
2 * var)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
VGGNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hh/chhx3le7itvqiqhgpyf6t5xbaz3qoieowrwxioelmfh6lal6ftfr.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p5/cp5hv3hjyne6yfdkxfpcoembj5kdzfqaljt3y6s6i2vddt5q7436.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (128, 131072), (131072, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (6, 128), (128, 1))
assert_size_stride(primals_11, (6, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 131072, grid=grid(131072), stream=stream0)
buf6 = empty_strided_cuda((1, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (1, 131072), (0, 1), 0), reinterpret_tensor(primals_6, (131072, 128), (1, 131072), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf7, primals_7, 128, grid=grid(128), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((1, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (128, 128), (1, 128), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf9, primals_9, 128, grid=grid(128), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((1, 6), (6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(primals_10, (128, 6), (1, 128), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_1, primals_3, primals_4, buf1, buf3, buf4, reinterpret_tensor(buf5, (1, 131072), (131072, 1), 0), buf7, buf9, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 131072), (131072, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((6, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class VGGNet(nn.Module):
def __init__(self):
super(VGGNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1))
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1))
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.fc1 = nn.Linear(32 * 64 * 64, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 6)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(-1, 32 * 64 * 64)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (128, 131072), (131072, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128), (128, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (6, 128), (128, 1))
assert_size_stride(primals_11, (6,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf3, buf4,
buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = empty_strided_cuda((1, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (1, 131072), (0, 1), 0),
reinterpret_tensor(primals_6, (131072, 128), (1, 131072), 0),
out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_2[grid(128)](buf7, primals_7, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((1, 128), (128, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (128, 128), (
1, 128), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_2[grid(128)](buf9, primals_9, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((1, 6), (6, 1), torch.float32)
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(
primals_10, (128, 6), (1, 128), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_1, primals_3, primals_4, buf1, buf3, buf4,
reinterpret_tensor(buf5, (1, 131072), (131072, 1), 0), buf7, buf9,
primals_10, primals_8, primals_6)
class VGGNetNew(nn.Module):
def __init__(self):
super(VGGNetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1))
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1))
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.fc1 = nn.Linear(32 * 64 * 64, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 6)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| miyosuda/oculomotor | VGGNet | false | 7,295 | [
"Apache-2.0"
] | 1 | 78e7ec61a808d058116c69bff1ea71ecf117c126 | https://github.com/miyosuda/oculomotor/tree/78e7ec61a808d058116c69bff1ea71ecf117c126 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1))
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1))
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.fc1 = nn.Linear(32 * 64 * 64, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 6)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(-1, 32 * 64 * 64)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_1, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3jcopeflsiuvhfrdhwyz5ezfrszpfwmvahpxzfmeqiuakakg7z.py
# Topologically Sorted Source Nodes: [conv1d, x_2, adaptive_max_pool1d], Original ATen: [aten.convolution, aten.relu, aten.adaptive_max_pool2d, aten.threshold_backward]
# Source node to ATen node mapping:
# adaptive_max_pool1d => adaptive_max_pool2d, getitem_1
# conv1d => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_1, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %adaptive_max_pool2d : [num_users=2] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%unsqueeze, [1, 1]), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%adaptive_max_pool2d, 1), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_adaptive_max_pool2d_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_adaptive_max_pool2d_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_adaptive_max_pool2d_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp4, xmask)
tl.store(out_ptr2 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (7, 4), (4, 1))
assert_size_stride(primals_7, (7, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf0, buf1, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1), (4, 1, 1))
del buf1
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int64)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv1d, x_2, adaptive_max_pool1d], Original ATen: [aten.convolution, aten.relu, aten.adaptive_max_pool2d, aten.threshold_backward]
triton_poi_fused_adaptive_max_pool2d_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf4, buf5, buf7, 16, grid=grid(16), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 7), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_7
return (buf6, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), primals_6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((7, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((7, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class CNN(nn.Module):
""" CNN for heat shock protein classification """
def __init__(self, model_cfg, in_channels, dropout_rate):
super(CNN, self).__init__()
self.embedder = model_cfg.embedder
if self.embedder != 'OneHot':
self.embed = nn.Linear(in_channels, model_cfg.embed_dim)
in_channels = model_cfg.embed_dim
self.conv = nn.Conv1d(in_channels, model_cfg.num_channels,
model_cfg.kernel_size, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate if dropout_rate is not
None else 0)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.linear = nn.Linear(model_cfg.num_channels, 7)
def forward(self, x):
if self.embedder != 'OneHot':
x = self.embed(x)
x = x.permute(0, 2, 1)
x = self.relu(self.conv(x))
x = self.max_pool(x).reshape(len(x), -1)
x = self.dropout(x)
x = self.linear(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'model_cfg': _mock_config(embedder=4, embed_dim=4,
num_channels=4, kernel_size=4), 'in_channels': 4, 'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_adaptive_max_pool2d_convolution_relu_threshold_backward_1(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = 0.0
tmp7 = tmp4 <= tmp6
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp4, xmask)
tl.store(out_ptr2 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (7, 4), (4, 1))
assert_size_stride(primals_7, (7,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](buf0, buf1, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1), (4, 1, 1))
del buf1
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int64)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool)
triton_poi_fused_adaptive_max_pool2d_convolution_relu_threshold_backward_1[
grid(16)](buf3, primals_5, buf4, buf5, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (4, 4), (4,
1), 0), reinterpret_tensor(primals_6, (4, 7), (1, 4), 0), alpha
=1, beta=1, out=buf6)
del primals_7
return buf6, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0
), buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), primals_6, buf7
class CNNNew(nn.Module):
""" CNN for heat shock protein classification """
def __init__(self, model_cfg, in_channels, dropout_rate):
super(CNNNew, self).__init__()
self.embedder = model_cfg.embedder
if self.embedder != 'OneHot':
self.embed = nn.Linear(in_channels, model_cfg.embed_dim)
in_channels = model_cfg.embed_dim
self.conv = nn.Conv1d(in_channels, model_cfg.num_channels,
model_cfg.kernel_size, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate if dropout_rate is not
None else 0)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.linear = nn.Linear(model_cfg.num_channels, 7)
def forward(self, input_0):
primals_1 = self.embed.weight
primals_2 = self.embed.bias
primals_3 = self.conv.weight
primals_5 = self.conv.bias
primals_6 = self.linear.weight
primals_7 = self.linear.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| mswzeus/DeeperHSP | CNN | false | 7,296 | [
"MIT"
] | 1 | 571387f048d3c33fcd78730fdaef57b6c44a27a7 | https://github.com/mswzeus/DeeperHSP/tree/571387f048d3c33fcd78730fdaef57b6c44a27a7 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
""" CNN for heat shock protein classification """
def __init__(self, model_cfg, in_channels, dropout_rate):
super().__init__()
self.embedder = model_cfg.embedder
if self.embedder != 'OneHot':
self.embed = nn.Linear(in_channels, model_cfg.embed_dim)
in_channels = model_cfg.embed_dim
self.conv = nn.Conv1d(in_channels, model_cfg.num_channels,
model_cfg.kernel_size, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate if dropout_rate is not
None else 0)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.linear = nn.Linear(model_cfg.num_channels, 7)
def forward(self, x):
if self.embedder != 'OneHot':
x = self.embed(x)
x = x.permute(0, 2, 1)
x = self.relu(self.conv(x))
x = self.max_pool(x).reshape(len(x), -1)
x = self.dropout(x)
x = self.linear(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'model_cfg': _mock_config(embedder=4, embed_dim=4,
num_channels=4, kernel_size=4), 'in_channels': 4, 'dropout_rate': 0.5}]
|
BlendLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fk/cfki6qg2njj4g7taj7ghaesj5brkn444q4jnoahj4egxmycjafqi.py
# Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul), kwargs = {})
triton_poi_fused_add_mul_sub_0 = async_compile.triton('triton_poi_fused_add_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp2
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tl.store(in_out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.sub, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sub_0.run(buf2, primals_2, buf1, primals_5, primals_6, 256, grid=grid(256), stream=stream0)
del buf1
del primals_2
del primals_5
return (buf2, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BlendLinear(nn.Module):
def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs):
super(BlendLinear, self).__init__()
self._layer0 = layer_type(dim_in, dim_out)
self._layer1 = layer_type(dim_in, dim_out)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp2
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tl.store(in_out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_sub_0[grid(256)](buf2, primals_2, buf1,
primals_5, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_5
return buf2, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class BlendLinearNew(nn.Module):
def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs):
super(BlendLinearNew, self).__init__()
self._layer0 = layer_type(dim_in, dim_out)
self._layer1 = layer_type(dim_in, dim_out)
def forward(self, input_0, input_1):
primals_1 = self._layer0.weight
primals_2 = self._layer0.bias
primals_4 = self._layer1.weight
primals_5 = self._layer1.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| musyoku/ffjord | BlendLinear | false | 7,297 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs):
super().__init__()
self._layer0 = layer_type(dim_in, dim_out)
self._layer1 = layer_type(dim_in, dim_out)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BlendConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ds/cdsny3rdh32tqyjkeifiz77mt6yoehbkwlrxx6goqlqvlxxbtmip.py
# Topologically Sorted Source Nodes: [y0, y1, sub, mul, add], Original ATen: [aten.convolution, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sub => sub
# y0 => convolution
# y1 => convolution_1
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_1, %convolution), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %mul), kwargs = {})
triton_poi_fused_add_convolution_mul_sub_0 = async_compile.triton('triton_poi_fused_add_convolution_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp2
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 2, 2), (16, 4, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [y0], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
# Topologically Sorted Source Nodes: [y1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [y0, y1, sub, mul, add], Original ATen: [aten.convolution, aten.sub, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_mul_sub_0.run(buf2, primals_2, buf1, primals_5, primals_6, 64, grid=grid(64), stream=stream0)
del buf1
del primals_2
del primals_5
return (buf2, primals_1, primals_3, primals_4, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BlendConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False, **unused_kwargs):
super(BlendConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer0 = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
self._layer1 = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
def get_inputs():
return [torch.rand([4, 4, 2, 2]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_mul_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp5 - tmp2
tmp8 = tmp6 * tmp7
tmp9 = tmp2 + tmp8
tl.store(in_out_ptr0 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 2, 2), (16, 4, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_convolution_mul_sub_0[grid(64)](buf2,
primals_2, buf1, primals_5, primals_6, 64, XBLOCK=64, num_warps
=1, num_stages=1)
del buf1
del primals_2
del primals_5
return buf2, primals_1, primals_3, primals_4, primals_6
class BlendConv2dNew(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False, **unused_kwargs):
super(BlendConv2dNew, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer0 = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
self._layer1 = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
def forward(self, input_0, input_1):
primals_1 = self._layer0.weight
primals_2 = self._layer0.bias
primals_4 = self._layer1.weight
primals_5 = self._layer1.bias
primals_6 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| musyoku/ffjord | BlendConv2d | false | 7,298 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False, **unused_kwargs):
super().__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer0 = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
self._layer1 = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
def forward(self, t, x):
y0 = self._layer0(x)
y1 = self._layer1(x)
return y0 + (y1 - y0) * t
def get_inputs():
return [torch.rand([4, 4, 2, 2]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GINPreTransition | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2k/c2kiox2wvshockbbzjlycxwhjeigavlrfwuvcpbcbxpipbm7d7k6.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# input_2 => tanh
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4c/c4c6imuywsqbbstgigf3vhemjpzi4d2e6k2igmpufo34pyjzsogf.py
# Topologically Sorted Source Nodes: [input_5, getitem_1, getitem_3], Original ATen: [aten.tanh, aten.index]
# Source node to ATen node mapping:
# getitem_1 => index
# getitem_3 => index_1
# input_5 => tanh_1
# Graph fragment:
# %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%addmm_2,), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%tanh_1, [%select]), kwargs = {})
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%tanh_1, [%select_1]), kwargs = {})
triton_poi_fused_index_tanh_2 = async_compile.triton('triton_poi_fused_index_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_tanh_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tmp7 = libdevice.tanh(tmp6)
tmp9 = tmp8 + tmp1
tmp10 = tmp8 < 0
tmp11 = tl.where(tmp10, tmp9, tmp8)
tl.device_assert(((0 <= tmp11) & (tmp11 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp11 < 4")
tmp13 = tl.load(in_ptr1 + (x0 + (4*tmp11)), xmask)
tmp14 = libdevice.tanh(tmp13)
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_5, getitem_1, getitem_3], Original ATen: [aten.tanh, aten.index]
triton_poi_fused_index_tanh_2.run(primals_9, buf4, buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_5, getitem_1, getitem_3, new_state], Original ATen: [aten.tanh, aten.index, aten.add]
extern_kernels._mm_plus_mm(primals_10, buf5, primals_10, buf6, out=buf7)
del buf5
del buf6
return (buf7, buf0, buf2, buf3, buf4, reinterpret_tensor(primals_9, (4, ), (4, ), 1), reinterpret_tensor(primals_9, (4, ), (4, ), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import typing
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, input_dim, hidden_sizes: 'typing.Iterable[int]',
out_dim, activation_function=nn.Sigmoid(), activation_out=None):
super(MLP, self).__init__()
i_h_sizes = [input_dim] + hidden_sizes
self.mlp = nn.Sequential()
for idx in range(len(i_h_sizes) - 1):
self.mlp.add_module('layer_{}'.format(idx), nn.Linear(
in_features=i_h_sizes[idx], out_features=i_h_sizes[idx + 1]))
self.mlp.add_module('act', activation_function)
self.mlp.add_module('out_layer', nn.Linear(i_h_sizes[-1], out_dim))
if activation_out is not None:
self.mlp.add_module('out_layer_activation', activation_out)
def init(self):
for i, l in enumerate(self.mlp):
if type(l) == nn.Linear:
nn.init.xavier_normal_(l.weight)
def forward(self, x):
return self.mlp(x)
class GINPreTransition(nn.Module):
def __init__(self, node_state_dim: 'int', node_label_dim: 'int',
mlp_hidden_dim: 'typing.Iterable[int]', activation_function=nn.Tanh()):
super(type(self), self).__init__()
d_i = node_state_dim + node_label_dim
d_o = node_state_dim
d_h = list(mlp_hidden_dim)
self.mlp = MLP(input_dim=d_i, hidden_sizes=d_h, out_dim=d_o,
activation_function=activation_function, activation_out=
activation_function)
def forward(self, node_states, node_labels, edges, agg_matrix):
intermediate_states = self.mlp(torch.cat([node_states, node_labels],
-1))
new_state = torch.matmul(agg_matrix, intermediate_states[edges[:, 1]]
) + torch.matmul(agg_matrix, intermediate_states[edges[:, 0]])
return new_state
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'node_state_dim': 4, 'node_label_dim': 4, 'mlp_hidden_dim':
[4, 4]}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import typing
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_index_tanh_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tmp7 = libdevice.tanh(tmp6)
tmp9 = tmp8 + tmp1
tmp10 = tmp8 < 0
tmp11 = tl.where(tmp10, tmp9, tmp8)
tl.device_assert((0 <= tmp11) & (tmp11 < 4) | ~xmask,
'index out of bounds: 0 <= tmp11 < 4')
tmp13 = tl.load(in_ptr1 + (x0 + 4 * tmp11), xmask)
tmp14 = libdevice.tanh(tmp13)
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_tanh_1[grid(16)](buf2, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_index_tanh_2[grid(16)](primals_9, buf4, buf5, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels._mm_plus_mm(primals_10, buf5, primals_10, buf6, out=buf7
)
del buf5
del buf6
return buf7, buf0, buf2, buf3, buf4, reinterpret_tensor(primals_9, (4,),
(4,), 1), reinterpret_tensor(primals_9, (4,), (4,), 0
), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0
), primals_7, primals_5
class MLP(nn.Module):
def __init__(self, input_dim, hidden_sizes: 'typing.Iterable[int]',
out_dim, activation_function=nn.Sigmoid(), activation_out=None):
super(MLP, self).__init__()
i_h_sizes = [input_dim] + hidden_sizes
self.mlp = nn.Sequential()
for idx in range(len(i_h_sizes) - 1):
self.mlp.add_module('layer_{}'.format(idx), nn.Linear(
in_features=i_h_sizes[idx], out_features=i_h_sizes[idx + 1]))
self.mlp.add_module('act', activation_function)
self.mlp.add_module('out_layer', nn.Linear(i_h_sizes[-1], out_dim))
if activation_out is not None:
self.mlp.add_module('out_layer_activation', activation_out)
def init(self):
for i, l in enumerate(self.mlp):
if type(l) == nn.Linear:
nn.init.xavier_normal_(l.weight)
def forward(self, x):
return self.mlp(x)
class GINPreTransitionNew(nn.Module):
def __init__(self, node_state_dim: 'int', node_label_dim: 'int',
mlp_hidden_dim: 'typing.Iterable[int]', activation_function=nn.Tanh()):
super(type(self), self).__init__()
d_i = node_state_dim + node_label_dim
d_o = node_state_dim
d_h = list(mlp_hidden_dim)
self.mlp = MLP(input_dim=d_i, hidden_sizes=d_h, out_dim=d_o,
activation_function=activation_function, activation_out=
activation_function)
def forward(self, input_0, input_1, input_2, input_3):
primals_3 = self.mlp.mlp.layer_0.weight
primals_4 = self.mlp.mlp.layer_0.bias
primals_1 = self.mlp.mlp.layer_1.weight
primals_6 = self.mlp.mlp.layer_1.bias
primals_2 = self.mlp.mlp.out_layer.weight
primals_8 = self.mlp.mlp.out_layer.bias
primals_5 = input_0
primals_7 = input_1
primals_9 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| mtiezzi/gnn_site | GINPreTransition | false | 7,299 | [
"BSD-3-Clause"
] | 1 | 79a13603db876ac24e66a152104faa8b76e1d8e7 | https://github.com/mtiezzi/gnn_site/tree/79a13603db876ac24e66a152104faa8b76e1d8e7 | import torch
import typing
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, input_dim, hidden_sizes: 'typing.Iterable[int]',
out_dim, activation_function=nn.Sigmoid(), activation_out=None):
super().__init__()
i_h_sizes = [input_dim] + hidden_sizes
self.mlp = nn.Sequential()
for idx in range(len(i_h_sizes) - 1):
self.mlp.add_module('layer_{}'.format(idx), nn.Linear(
in_features=i_h_sizes[idx], out_features=i_h_sizes[idx + 1]))
self.mlp.add_module('act', activation_function)
self.mlp.add_module('out_layer', nn.Linear(i_h_sizes[-1], out_dim))
if activation_out is not None:
self.mlp.add_module('out_layer_activation', activation_out)
def init(self):
for i, l in enumerate(self.mlp):
if type(l) == nn.Linear:
nn.init.xavier_normal_(l.weight)
def forward(self, x):
return self.mlp(x)
class Model(nn.Module):
def __init__(self, node_state_dim: 'int', node_label_dim: 'int',
mlp_hidden_dim: 'typing.Iterable[int]', activation_function=nn.Tanh()):
super(type(self), self).__init__()
d_i = node_state_dim + node_label_dim
d_o = node_state_dim
d_h = list(mlp_hidden_dim)
self.mlp = MLP(input_dim=d_i, hidden_sizes=d_h, out_dim=d_o,
activation_function=activation_function, activation_out=
activation_function)
def forward(self, node_states, node_labels, edges, agg_matrix):
intermediate_states = self.mlp(torch.cat([node_states, node_labels],
-1))
new_state = torch.matmul(agg_matrix, intermediate_states[edges[:, 1]]
) + torch.matmul(agg_matrix, intermediate_states[edges[:, 0]])
return new_state
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.ones([4, 4],
dtype=torch.int64), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'node_state_dim': 4, 'node_label_dim': 4, 'mlp_hidden_dim':
[4, 4]}]
|
ConcatSquashLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/w2/cw2aqi7423xdfnyiz52ub43v4vckwb466eb4jrlbmqamapriavp7.py
# Topologically Sorted Source Nodes: [sigmoid, mul, add], Original ATen: [aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mm), kwargs = {})
triton_poi_fused_add_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, primals_4, reinterpret_tensor(primals_5, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_4, reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, add], Original ATen: [aten.sigmoid, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0.run(buf0, buf1, buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class ConcatSquashLinear(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatSquashLinear, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
self._hyper_gate = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))
) + self._hyper_bias(t.view(1, 1))
def get_inputs():
return [torch.rand([1, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, primals_4, reinterpret_tensor(
primals_5, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_7, (1, 4),
(1, 1), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0[grid(256)](buf0, buf1, buf2,
buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, buf1
class ConcatSquashLinearNew(nn.Module):
def __init__(self, dim_in, dim_out):
super(ConcatSquashLinearNew, self).__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
self._hyper_gate = nn.Linear(1, dim_out)
def forward(self, input_0, input_1):
primals_1 = self._layer.weight
primals_2 = self._layer.bias
primals_5 = self._hyper_bias.weight
primals_7 = self._hyper_gate.weight
primals_6 = self._hyper_gate.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| musyoku/ffjord | ConcatSquashLinear | false | 7,300 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self._layer = nn.Linear(dim_in, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
self._hyper_gate = nn.Linear(1, dim_out)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))
) + self._hyper_bias(t.view(1, 1))
def get_inputs():
return [torch.rand([1, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConcatSquashConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ej/cejcyvz7tzjp6tek7y3hc7b5nw6pshpwovikzdozuaem5aw7jmsd.py
# Topologically Sorted Source Nodes: [conv2d, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# conv2d => convolution
# mul => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %view_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_3), kwargs = {})
triton_poi_fused_add_convolution_mul_0 = async_compile.triton('triton_poi_fused_add_convolution_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tmp7 = tmp5 + tmp6
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, primals_4, reinterpret_tensor(primals_5, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_4, reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), out=buf3)
del primals_7
buf1 = buf0; del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_mul_0.run(buf1, primals_2, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf3
del primals_2
return (buf4, primals_1, primals_3, primals_4, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class ConcatSquashConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatSquashConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
self._hyper_gate = nn.Linear(1, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))
).view(1, -1, 1, 1) + self._hyper_bias(t.view(1, 1)).view(1, -1,
1, 1)
def get_inputs():
return [torch.rand([1, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_convolution_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp2 * tmp4
tmp7 = tmp5 + tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (4, 1), (1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, primals_4, reinterpret_tensor(
primals_5, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_7, (1, 4),
(1, 1), 0), out=buf3)
del primals_7
buf1 = buf0
del buf0
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_convolution_mul_0[grid(64)](buf1, primals_2,
buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
del primals_2
return buf4, primals_1, primals_3, primals_4, buf1, buf2
class ConcatSquashConv2dNew(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatSquashConv2dNew, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
self._hyper_gate = nn.Linear(1, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, input_0, input_1):
primals_1 = self._layer.weight
primals_2 = self._layer.bias
primals_5 = self._hyper_gate.weight
primals_6 = self._hyper_gate.bias
primals_7 = self._hyper_bias.weight
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| musyoku/ffjord | ConcatSquashConv2d | false | 7,301 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super().__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in, dim_out, kernel_size=ksize, stride=
stride, padding=padding, dilation=dilation, groups=groups, bias
=bias)
self._hyper_gate = nn.Linear(1, dim_out)
self._hyper_bias = nn.Linear(1, dim_out, bias=False)
def forward(self, t, x):
return self._layer(x) * torch.sigmoid(self._hyper_gate(t.view(1, 1))
).view(1, -1, 1, 1) + self._hyper_bias(t.view(1, 1)).view(1, -1,
1, 1)
def get_inputs():
return [torch.rand([1, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GatedConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3e/c3eee7neqslxkoihqpsmtjcjpjfrwf663xmas4li4f3utsnbc6cs.py
# Topologically Sorted Source Nodes: [f, conv2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# f => convolution
# g => sigmoid
# mul => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(in_out_ptr1 + (x2), tmp5, xmask)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [f], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [f, conv2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, buf3, primals_2, primals_5, buf4, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_5
return (buf4, primals_1, primals_3, primals_4, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class GatedConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1):
super(GatedConv, self).__init__()
self.layer_f = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=1, groups=groups)
self.layer_g = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=1, groups=groups)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(in_out_ptr1 + x2, tmp5, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(16)](buf1, buf3,
primals_2, primals_5, buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_5
return buf4, primals_1, primals_3, primals_4, buf1, buf3
class GatedConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1):
super(GatedConvNew, self).__init__()
self.layer_f = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=1, groups=groups)
self.layer_g = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=1, groups=groups)
def forward(self, input_0):
primals_1 = self.layer_f.weight
primals_2 = self.layer_f.bias
primals_3 = self.layer_g.weight
primals_5 = self.layer_g.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| musyoku/ffjord | GatedConv | false | 7,302 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1):
super().__init__()
self.layer_f = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=1, groups=groups)
self.layer_g = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=1, groups=groups)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
GatedConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/n7/cn763ltmvwhyolo4ons5bcg43w7gpcruu5cxt6jv63ou2sn6r2wl.py
# Topologically Sorted Source Nodes: [h, conv2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# g => sigmoid
# h => convolution
# mul => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(in_out_ptr1 + (x3), tmp5, xmask)
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0; del buf0 # reuse
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [h, conv2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, buf3, primals_2, primals_5, buf4, 1296, grid=grid(1296), stream=stream0)
del primals_2
del primals_5
return (buf4, primals_1, primals_3, primals_4, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size':
4, 'stride': 1, 'padding': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(1296)](buf1, buf3,
primals_2, primals_5, buf4, 1296, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_5
return buf4, primals_1, primals_3, primals_4, buf1, buf3
class GatedConv2dNew(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super(GatedConv2dNew, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, input_0):
primals_1 = self.h.weight
primals_2 = self.h.bias
primals_3 = self.g.weight
primals_5 = self.g.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| musyoku/ffjord | GatedConv2d | false | 7,303 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride,
padding, dilation=1, activation=None):
super().__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size,
stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4, 'kernel_size':
4, 'stride': 1, 'padding': 4}]
|
GatedConvTranspose | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6k/c6kazydtzigopxuzedfthhmhnydldamntm2carnmlp5uv53z3g7p.py
# Topologically Sorted Source Nodes: [f, conv_transpose2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_1
# f => convolution
# g => sigmoid
# mul => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(in_out_ptr1 + (x3), tmp5, xmask)
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [f], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0; del buf0 # reuse
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [f, conv_transpose2d_1, g, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, buf3, primals_2, primals_5, buf4, 784, grid=grid(784), stream=stream0)
del primals_2
del primals_5
return (buf4, primals_1, primals_3, primals_4, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class GatedConvTranspose(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1):
super(GatedConvTranspose, self).__init__()
self.layer_f = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, output_padding=
output_padding, groups=groups)
self.layer_g = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, output_padding=
output_padding, groups=groups)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tl.sigmoid(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(in_out_ptr1 + x3, tmp5, xmask)
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = buf0
del buf0
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(784)](buf1, buf3,
primals_2, primals_5, buf4, 784, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_5
return buf4, primals_1, primals_3, primals_4, buf1, buf3
class GatedConvTransposeNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1):
super(GatedConvTransposeNew, self).__init__()
self.layer_f = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, output_padding=
output_padding, groups=groups)
self.layer_g = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, output_padding=
output_padding, groups=groups)
def forward(self, input_0):
primals_1 = self.layer_f.weight
primals_2 = self.layer_f.bias
primals_3 = self.layer_g.weight
primals_5 = self.layer_g.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| musyoku/ffjord | GatedConvTranspose | false | 7,304 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1):
super().__init__()
self.layer_f = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, output_padding=
output_padding, groups=groups)
self.layer_g = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride=stride, padding=padding, output_padding=
output_padding, groups=groups)
def forward(self, x):
f = self.layer_f(x)
g = torch.sigmoid(self.layer_g(x))
return f * g
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
HyperConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %view_2, %slice_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 1), (1, 1))
assert_size_stride(primals_2, (148, 1), (1, 1))
assert_size_stride(primals_3, (148, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 148), (148, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (1, 148), (1, 1), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_4, reinterpret_tensor(buf0, (4, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, buf0, 64, grid=grid(64), stream=stream0)
return (buf2, primals_1, primals_4, reinterpret_tensor(buf0, (4, 4, 3, 3), (36, 9, 3, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((148, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((148, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv') != -1:
nn.init.constant_(m.weight, 0)
nn.init.normal_(m.bias, 0, 0.01)
class HyperConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(HyperConv2d, self).__init__()
assert dim_in % groups == 0 and dim_out % groups == 0, 'dim_in and dim_out must both be divisible by groups.'
self.dim_in = dim_in
self.dim_out = dim_out
self.ksize = ksize
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.transpose = transpose
self.params_dim = int(dim_in * dim_out * ksize * ksize / groups)
if self.bias:
self.params_dim += dim_out
self._hypernet = nn.Linear(1, self.params_dim)
self.conv_fn = F.conv_transpose2d if transpose else F.conv2d
self._hypernet.apply(weights_init)
def forward(self, t, x):
params = self._hypernet(t.view(1, 1)).view(-1)
weight_size = int(self.dim_in * self.dim_out * self.ksize * self.
ksize / self.groups)
if self.transpose:
weight = params[:weight_size].view(self.dim_in, self.dim_out //
self.groups, self.ksize, self.ksize)
else:
weight = params[:weight_size].view(self.dim_out, self.dim_in //
self.groups, self.ksize, self.ksize)
bias = params[:self.dim_out].view(self.dim_out) if self.bias else None
return self.conv_fn(x, weight=weight, bias=bias, stride=self.stride,
padding=self.padding, groups=self.groups, dilation=self.dilation)
def get_inputs():
return [torch.rand([1, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 1), (1, 1))
assert_size_stride(primals_2, (148, 1), (1, 1))
assert_size_stride(primals_3, (148,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 148), (148, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (1, 148), (1, 1), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = extern_kernels.convolution(primals_4, reinterpret_tensor(
buf0, (4, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(
0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf2, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf2, primals_1, primals_4, reinterpret_tensor(buf0, (4, 4, 3, 3
), (36, 9, 3, 1), 0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv') != -1:
nn.init.constant_(m.weight, 0)
nn.init.normal_(m.bias, 0, 0.01)
class HyperConv2dNew(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(HyperConv2dNew, self).__init__()
assert dim_in % groups == 0 and dim_out % groups == 0, 'dim_in and dim_out must both be divisible by groups.'
self.dim_in = dim_in
self.dim_out = dim_out
self.ksize = ksize
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.transpose = transpose
self.params_dim = int(dim_in * dim_out * ksize * ksize / groups)
if self.bias:
self.params_dim += dim_out
self._hypernet = nn.Linear(1, self.params_dim)
self.conv_fn = F.conv_transpose2d if transpose else F.conv2d
self._hypernet.apply(weights_init)
def forward(self, input_0, input_1):
primals_2 = self._hypernet.weight
primals_3 = self._hypernet.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| musyoku/ffjord | HyperConv2d | false | 7,305 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1 or classname.find('Conv') != -1:
nn.init.constant_(m.weight, 0)
nn.init.normal_(m.bias, 0, 0.01)
class Model(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super().__init__()
assert dim_in % groups == 0 and dim_out % groups == 0, 'dim_in and dim_out must both be divisible by groups.'
self.dim_in = dim_in
self.dim_out = dim_out
self.ksize = ksize
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.transpose = transpose
self.params_dim = int(dim_in * dim_out * ksize * ksize / groups)
if self.bias:
self.params_dim += dim_out
self._hypernet = nn.Linear(1, self.params_dim)
self.conv_fn = F.conv_transpose2d if transpose else F.conv2d
self._hypernet.apply(weights_init)
def forward(self, t, x):
params = self._hypernet(t.view(1, 1)).view(-1)
weight_size = int(self.dim_in * self.dim_out * self.ksize * self.
ksize / self.groups)
if self.transpose:
weight = params[:weight_size].view(self.dim_in, self.dim_out //
self.groups, self.ksize, self.ksize)
else:
weight = params[:weight_size].view(self.dim_out, self.dim_in //
self.groups, self.ksize, self.ksize)
bias = params[:self.dim_out].view(self.dim_out) if self.bias else None
return self.conv_fn(x, weight=weight, bias=bias, stride=self.stride,
padding=self.padding, groups=self.groups, dilation=self.dilation)
def get_inputs():
return [torch.rand([1, 1]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wd/cwdc75bzdixqjtzarkbcxze6jwzufryjpat4f3mbqxnzuklbuuxw.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# out_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 0.0001), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused_native_group_norm_0 = async_compile.triton('triton_per_fused_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[8, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 8
rnumel = 32
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (32*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 32.0
tmp18 = tmp16 / tmp17
tmp19 = 0.0001
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fh/cfh6pud2lmejfzlfcsbg3xpj6hy3yssbhoyvgzu4xj3z7teollra.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# out_1 => add_1, mul_1
# out_2 => relu
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_group_norm_relu_1 = async_compile.triton('triton_poi_fused_native_group_norm_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + ((x4 // 2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((x4 // 2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 32.0
tmp5 = tmp3 / tmp4
tmp6 = 0.0001
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/an/can2ugv2pect72d2sdxuzgit2owstg2msehowjuz235bvx4rqo2x.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_4 => add_3, mul_3
# out_5 => add_4
# out_6 => relu_1
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + ((x4 // 2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((x4 // 2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x3), xmask)
tmp2 = tmp0 - tmp1
tmp4 = 32.0
tmp5 = tmp3 / tmp4
tmp6 = 0.0001
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = 0.0
tmp19 = tmp17 <= tmp18
tl.store(out_ptr0 + (x3), tmp17, xmask)
tl.store(out_ptr1 + (x3), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf2 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf4 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_0.run(buf0, buf1, buf2, buf4, 8, 32, grid=grid(8), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_1.run(buf0, buf1, buf2, primals_3, primals_4, buf5, 256, grid=grid(256), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf2; del buf2 # reuse
buf8 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf10 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_0.run(buf6, buf7, buf8, buf10, 8, 32, grid=grid(8), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_native_group_norm_relu_threshold_backward_2.run(buf6, buf7, buf8, primals_6, primals_7, primals_1, buf11, buf12, 256, grid=grid(256), stream=stream0)
del buf8
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 2), (2, 1), 0), reinterpret_tensor(buf4, (4, 2), (2, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 2), (2, 1), 0), reinterpret_tensor(buf10, (4, 2), (2, 1), 0), buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.GroupNorm(2, dim, eps=0.0001)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.GroupNorm(2, dim, eps=0.0001)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 8
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 32.0
tmp18 = tmp16 / tmp17
tmp19 = 0.0001
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4 // 2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4 // 2, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 32.0
tmp5 = tmp3 / tmp4
tmp6 = 0.0001
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_threshold_backward_2(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4 // 2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4 // 2, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, xmask)
tmp2 = tmp0 - tmp1
tmp4 = 32.0
tmp5 = tmp3 / tmp4
tmp6 = 0.0001
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tmp18 = 0.0
tmp19 = tmp17 <= tmp18
tl.store(out_ptr0 + x3, tmp17, xmask)
tl.store(out_ptr1 + x3, tmp19, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf2 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf4 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(8)](buf0, buf1, buf2,
buf4, 8, 32, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_group_norm_relu_1[grid(256)](buf0, buf1,
buf2, primals_3, primals_4, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf2
del buf2
buf8 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf10 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
triton_per_fused_native_group_norm_0[grid(8)](buf6, buf7, buf8,
buf10, 8, 32, XBLOCK=1, num_warps=2, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_native_group_norm_relu_threshold_backward_2[grid
(256)](buf6, buf7, buf8, primals_6, primals_7, primals_1, buf11,
buf12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6,
buf0, reinterpret_tensor(buf1, (4, 2), (2, 1), 0),
reinterpret_tensor(buf4, (4, 2), (2, 1), 0), buf5, buf6,
reinterpret_tensor(buf7, (4, 2), (2, 1), 0), reinterpret_tensor(
buf10, (4, 2), (2, 1), 0), buf12)
class BasicBlockNew(nn.Module):
expansion = 1
def __init__(self, dim):
super(BasicBlockNew, self).__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.GroupNorm(2, dim, eps=0.0001)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.GroupNorm(2, dim, eps=0.0001)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.bn1.weight
primals_4 = self.bn1.bias
primals_5 = self.conv2.weight
primals_6 = self.bn2.weight
primals_7 = self.bn2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| musyoku/ffjord | BasicBlock | false | 7,306 | [
"MIT"
] | 1 | 9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | https://github.com/musyoku/ffjord/tree/9e431e122e59fa9a71f3f301dec8fdd3db51e0ce | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
expansion = 1
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.GroupNorm(2, dim, eps=0.0001)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.GroupNorm(2, dim, eps=0.0001)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PNTrainingSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yt/cyt2r7ev6xwoufkow4jasbaepeofdv4q2arrsxdjxlcc2iun7pwq.py
# Topologically Sorted Source Nodes: [neg, sigmoid, mean, cost, sub, sigmoid_1, mean_1, mul_1, cost_1], Original ATen: [aten.neg, aten.sigmoid, aten.mean, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# cost => mul
# cost_1 => add
# mean => mean
# mean_1 => mean_1
# mul_1 => mul_1
# neg => neg
# sigmoid => sigmoid
# sigmoid_1 => sigmoid_1
# sub => sub
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%neg,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sigmoid,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %mean), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg2_1,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sigmoid_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mean_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_per_fused_add_mean_mul_neg_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_neg_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (r0), None)
tmp11 = tl.load(in_ptr2 + (r0), None)
tmp1 = -tmp0
tmp2 = tl.sigmoid(tmp1)
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp7 = tl.sigmoid(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp12 = 256.0
tmp13 = tmp5 / tmp12
tmp14 = tmp11 * tmp13
tmp15 = 1.0
tmp16 = tmp15 - tmp11
tmp17 = tmp10 / tmp12
tmp18 = tmp16 * tmp17
tmp19 = tmp14 + tmp18
tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, sigmoid, mean, cost, sub, sigmoid_1, mean_1, mul_1, cost_1], Original ATen: [aten.neg, aten.sigmoid, aten.mean, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_rsub_sigmoid_0.run(arg0_1, arg2_1, arg1_1, buf2, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class PNTrainingSigmoid(nn.Module):
def __init__(self):
super(PNTrainingSigmoid, self).__init__()
return
def forward(self, output_p, output_n, prior):
cost = prior * torch.mean(torch.sigmoid(-output_p))
cost = cost + (1 - prior) * torch.mean(torch.sigmoid(output_n))
return cost
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_neg_rsub_sigmoid_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp11 = tl.load(in_ptr2 + r0, None)
tmp1 = -tmp0
tmp2 = tl.sigmoid(tmp1)
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp7 = tl.sigmoid(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp12 = 256.0
tmp13 = tmp5 / tmp12
tmp14 = tmp11 * tmp13
tmp15 = 1.0
tmp16 = tmp15 - tmp11
tmp17 = tmp10 / tmp12
tmp18 = tmp16 * tmp17
tmp19 = tmp14 + tmp18
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp19, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_rsub_sigmoid_0[grid(1)](arg0_1,
arg2_1, arg1_1, buf2, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class PNTrainingSigmoidNew(nn.Module):
def __init__(self):
super(PNTrainingSigmoidNew, self).__init__()
return
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| mxuq/Imbalance-PU | PNTrainingSigmoid | false | 7,307 | [
"MIT"
] | 1 | fd4403b05f98ca6bc8156783e8275888d63f6435 | https://github.com/mxuq/Imbalance-PU/tree/fd4403b05f98ca6bc8156783e8275888d63f6435 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
return
def forward(self, output_p, output_n, prior):
cost = prior * torch.mean(torch.sigmoid(-output_p))
cost = cost + (1 - prior) * torch.mean(torch.sigmoid(output_n))
return cost
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
TwoWordPSDProbe | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qf/cqfsbij5fyvvptvaij7fdtghegxwkvlg5v6gxc2c7qrwytx5sxqw.py
# Topologically Sorted Source Nodes: [diffs, squared_diffs, squared_distances], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# diffs => sub
# squared_diffs => pow_1
# squared_distances => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %permute), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {})
triton_red_fused_pow_sub_sum_0 = async_compile.triton('triton_red_fused_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[64, 1024],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 64
rnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x4 = (xindex // 4)
x0 = xindex % 4
x2 = (xindex // 16)
_tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x5 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r3 + (1024*x4)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (r3 + (1024*x0) + (4096*x2)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = _tmp5 + tmp4
_tmp5 = tl.where(rmask & xmask, tmp6, _tmp5)
tmp5 = tl.sum(_tmp5, 1)[:, None]
tl.store(out_ptr0 + (x5), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 1024), (1024, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [transformed], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [diffs, squared_diffs, squared_distances], Original ATen: [aten.sub, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_red_fused_pow_sub_sum_0.run(buf0, buf1, 64, 1024, grid=grid(64), stream=stream0)
return (buf1, buf0, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Probe(nn.Module):
pass
class TwoWordPSDProbe(Probe):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self, model_dim, probe_rank=1024):
None
super(TwoWordPSDProbe, self).__init__()
self.probe_rank = probe_rank
self.model_dim = model_dim
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.
probe_rank))
nn.init.uniform_(self.proj, -0.05, 0.05)
def forward(self, batch):
""" Computes all n^2 pairs of distances after projection
for each sentence in a batch.
Note that due to padding, some distances will be non-zero for pads.
Computes (B(h_i-h_j))^T(B(h_i-h_j)) for all i,j
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of distances of shape (batch_size, max_seq_len, max_seq_len)
"""
transformed = torch.matmul(batch, self.proj)
_batchlen, seqlen, _rank = transformed.size()
transformed = transformed.unsqueeze(2)
transformed = transformed.expand(-1, -1, seqlen, -1)
transposed = transformed.transpose(1, 2)
diffs = transformed - transposed
squared_diffs = diffs.pow(2)
squared_distances = torch.sum(squared_diffs, -1)
return squared_distances
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'model_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 64
rnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x4 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16
_tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x5 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r3 + 1024 * x4), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (r3 + 1024 * x0 + 4096 * x2), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = _tmp5 + tmp4
_tmp5 = tl.where(rmask & xmask, tmp6, _tmp5)
tmp5 = tl.sum(_tmp5, 1)[:, None]
tl.store(out_ptr0 + x5, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 1024), (1024, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_red_fused_pow_sub_sum_0[grid(64)](buf0, buf1, 64, 1024,
XBLOCK=1, RBLOCK=1024, num_warps=8, num_stages=1)
return buf1, buf0, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0)
class Probe(nn.Module):
pass
class TwoWordPSDProbeNew(Probe):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self, model_dim, probe_rank=1024):
None
super(TwoWordPSDProbeNew, self).__init__()
self.probe_rank = probe_rank
self.model_dim = model_dim
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.
probe_rank))
nn.init.uniform_(self.proj, -0.05, 0.05)
def forward(self, input_0):
primals_1 = self.proj
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| muziyongshixin/pytorch_SSRP | TwoWordPSDProbe | false | 7,308 | [
"MIT"
] | 1 | e54b3098927ba2ff16bdc8f64f3a2bf46d1f72c5 | https://github.com/muziyongshixin/pytorch_SSRP/tree/e54b3098927ba2ff16bdc8f64f3a2bf46d1f72c5 | import torch
import torch.nn as nn
class Probe(nn.Module):
pass
class Model(Probe):
""" Computes squared L2 distance after projection by a matrix.
For a batch of sentences, computes all n^2 pairs of distances
for each sentence in the batch.
"""
def __init__(self, model_dim, probe_rank=1024):
None
super().__init__()
self.probe_rank = probe_rank
self.model_dim = model_dim
self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self.
probe_rank))
nn.init.uniform_(self.proj, -0.05, 0.05)
def forward(self, batch):
""" Computes all n^2 pairs of distances after projection
for each sentence in a batch.
Note that due to padding, some distances will be non-zero for pads.
Computes (B(h_i-h_j))^T(B(h_i-h_j)) for all i,j
Args:
batch: a batch of word representations of the shape
(batch_size, max_seq_len, representation_dim)
Returns:
A tensor of distances of shape (batch_size, max_seq_len, max_seq_len)
"""
transformed = torch.matmul(batch, self.proj)
_batchlen, seqlen, _rank = transformed.size()
transformed = transformed.unsqueeze(2)
transformed = transformed.expand(-1, -1, seqlen, -1)
transposed = transformed.transpose(1, 2)
diffs = transformed - transposed
squared_diffs = diffs.pow(2)
squared_distances = torch.sum(squared_diffs, -1)
return squared_distances
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
GroupPointWise | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 64, 4), (0, 4, 1), 0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), out=buf1)
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 1, 16, 4, 1), 0), reinterpret_tensor(buf0, (1, 4, 64), (256, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GroupPointWise(nn.Module):
def __init__(self, in_dim, n_heads=4, proj_factor=1, target_dim=None):
super().__init__()
if target_dim is not None:
proj_ch = target_dim // proj_factor
else:
proj_ch = in_dim // proj_factor
self.w = nn.Parameter(torch.Tensor(in_dim, n_heads, proj_ch // n_heads)
)
nn.init.normal_(self.w, std=0.01)
def forward(self, x):
x = x.permute(0, 2, 3, 1)
out = torch.einsum('bhwc,cnp->bnhwp', x, self.w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 64, 4), (256, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 64, 4), (0, 4, 1),
0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0),
out=buf1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 1, 16, 4, 1), 0
), reinterpret_tensor(buf0, (1, 4, 64), (256, 1, 4), 0)
class GroupPointWiseNew(nn.Module):
def __init__(self, in_dim, n_heads=4, proj_factor=1, target_dim=None):
super().__init__()
if target_dim is not None:
proj_ch = target_dim // proj_factor
else:
proj_ch = in_dim // proj_factor
self.w = nn.Parameter(torch.Tensor(in_dim, n_heads, proj_ch // n_heads)
)
nn.init.normal_(self.w, std=0.01)
def forward(self, input_0):
primals_2 = self.w
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| nachiket273/VisTrans | GroupPointWise | false | 7,309 | [
"MIT"
] | 1 | 99129b02f275424ebff900189ec2055f26bb9912 | https://github.com/nachiket273/VisTrans/tree/99129b02f275424ebff900189ec2055f26bb9912 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_dim, n_heads=4, proj_factor=1, target_dim=None):
super().__init__()
if target_dim is not None:
proj_ch = target_dim // proj_factor
else:
proj_ch = in_dim // proj_factor
self.w = nn.Parameter(torch.Tensor(in_dim, n_heads, proj_ch // n_heads)
)
nn.init.normal_(self.w, std=0.01)
def forward(self, x):
x = x.permute(0, 2, 3, 1)
out = torch.einsum('bhwc,cnp->bnhwp', x, self.w)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# score_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm_2, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# score_1 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (1, 16, 4), (64, 4, 1), 0), primals_4, out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [qkt], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_6
return (reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1,
score_function='scaled_dot_product', dropout=0):
""" Attention Mechanism
:param embed_dim:
:param hidden_dim:
:param out_dim:
:param n_head: num of head (Multi-Head Attention)
:param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot)
:return (?, q_len, out_dim,)
"""
super(Attention, self).__init__()
if hidden_dim is None:
hidden_dim = embed_dim // n_head
if out_dim is None:
out_dim = embed_dim
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_head = n_head
self.score_function = score_function
self.w_kx = nn.Parameter(torch.FloatTensor(n_head, embed_dim,
hidden_dim))
self.w_qx = nn.Parameter(torch.FloatTensor(n_head, embed_dim,
hidden_dim))
self.proj = nn.Linear(n_head * hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
if score_function == 'mlp':
self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2))
elif self.score_function == 'bi_linear':
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
else:
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_dim)
self.w_kx.data.uniform_(-stdv, stdv)
self.w_qx.data.uniform_(-stdv, stdv)
if self.weight is not None:
self.weight.data.uniform_(-stdv, stdv)
def forward(self, k, q):
if len(q.shape) == 2:
q = torch.unsqueeze(q, dim=1)
if len(k.shape) == 2:
k = torch.unsqueeze(k, dim=1)
mb_size = k.shape[0]
k_len = k.shape[1]
q_len = q.shape[1]
kx = k.repeat(self.n_head, 1, 1).view(self.n_head, -1, self.embed_dim)
qx = q.repeat(self.n_head, 1, 1).view(self.n_head, -1, self.embed_dim)
kx = torch.bmm(kx, self.w_kx).view(-1, k_len, self.hidden_dim)
qx = torch.bmm(qx, self.w_qx).view(-1, q_len, self.hidden_dim)
if self.score_function == 'scaled_dot_product':
kt = kx.permute(0, 2, 1)
qkt = torch.bmm(qx, kt)
score = torch.div(qkt, math.sqrt(self.hidden_dim))
elif self.score_function == 'mlp':
kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1)
qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1)
kq = torch.cat((kxx, qxx), dim=-1)
score = F.tanh(torch.matmul(kq, self.weight))
elif self.score_function == 'bi_linear':
qw = torch.matmul(qx, self.weight)
kt = kx.permute(0, 2, 1)
score = torch.bmm(qw, kt)
else:
raise RuntimeError('invalid score_function')
score = F.softmax(score, dim=-1)
output = torch.bmm(score, kx)
output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1)
output = self.proj(output)
output = self.dropout(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4,
1), 0), primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (1, 16, 4), (64, 4,
1), 0), primals_4, out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4,
1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf5, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_6
return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0
), primals_5, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0)
class AttentionNew(nn.Module):
def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1,
score_function='scaled_dot_product', dropout=0):
""" Attention Mechanism
:param embed_dim:
:param hidden_dim:
:param out_dim:
:param n_head: num of head (Multi-Head Attention)
:param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot)
:return (?, q_len, out_dim,)
"""
super(AttentionNew, self).__init__()
if hidden_dim is None:
hidden_dim = embed_dim // n_head
if out_dim is None:
out_dim = embed_dim
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_head = n_head
self.score_function = score_function
self.w_kx = nn.Parameter(torch.FloatTensor(n_head, embed_dim,
hidden_dim))
self.w_qx = nn.Parameter(torch.FloatTensor(n_head, embed_dim,
hidden_dim))
self.proj = nn.Linear(n_head * hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
if score_function == 'mlp':
self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2))
elif self.score_function == 'bi_linear':
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
else:
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_dim)
self.w_kx.data.uniform_(-stdv, stdv)
self.w_qx.data.uniform_(-stdv, stdv)
if self.weight is not None:
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input_0, input_1):
primals_3 = self.w_kx
primals_4 = self.w_qx
primals_5 = self.proj.weight
primals_6 = self.proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| n-log-n/ABSA-PyTorch | Attention | false | 7,310 | [
"MIT"
] | 1 | 27b37e05954940fe37369cc679c080d1d8717362 | https://github.com/n-log-n/ABSA-PyTorch/tree/27b37e05954940fe37369cc679c080d1d8717362 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1,
score_function='scaled_dot_product', dropout=0):
""" Attention Mechanism
:param embed_dim:
:param hidden_dim:
:param out_dim:
:param n_head: num of head (Multi-Head Attention)
:param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot)
:return (?, q_len, out_dim,)
"""
super().__init__()
if hidden_dim is None:
hidden_dim = embed_dim // n_head
if out_dim is None:
out_dim = embed_dim
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_head = n_head
self.score_function = score_function
self.w_kx = nn.Parameter(torch.FloatTensor(n_head, embed_dim,
hidden_dim))
self.w_qx = nn.Parameter(torch.FloatTensor(n_head, embed_dim,
hidden_dim))
self.proj = nn.Linear(n_head * hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
if score_function == 'mlp':
self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2))
elif self.score_function == 'bi_linear':
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
else:
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_dim)
self.w_kx.data.uniform_(-stdv, stdv)
self.w_qx.data.uniform_(-stdv, stdv)
if self.weight is not None:
self.weight.data.uniform_(-stdv, stdv)
def forward(self, k, q):
if len(q.shape) == 2:
q = torch.unsqueeze(q, dim=1)
if len(k.shape) == 2:
k = torch.unsqueeze(k, dim=1)
mb_size = k.shape[0]
k_len = k.shape[1]
q_len = q.shape[1]
kx = k.repeat(self.n_head, 1, 1).view(self.n_head, -1, self.embed_dim)
qx = q.repeat(self.n_head, 1, 1).view(self.n_head, -1, self.embed_dim)
kx = torch.bmm(kx, self.w_kx).view(-1, k_len, self.hidden_dim)
qx = torch.bmm(qx, self.w_qx).view(-1, q_len, self.hidden_dim)
if self.score_function == 'scaled_dot_product':
kt = kx.permute(0, 2, 1)
qkt = torch.bmm(qx, kt)
score = torch.div(qkt, math.sqrt(self.hidden_dim))
elif self.score_function == 'mlp':
kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1)
qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1)
kq = torch.cat((kxx, qxx), dim=-1)
score = F.tanh(torch.matmul(kq, self.weight))
elif self.score_function == 'bi_linear':
qw = torch.matmul(qx, self.weight)
kt = kx.permute(0, 2, 1)
score = torch.bmm(qw, kt)
else:
raise RuntimeError('invalid score_function')
score = F.softmax(score, dim=-1)
output = torch.bmm(score, kx)
output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1)
output = self.proj(output)
output = self.dropout(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
FitnetRegressor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3v/c3v7n6hzyrv5pn6uojl3hf6tko347a672spakigdzmqm7ebd4zwl.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
class FitnetRegressor(torch.nn.Module):
def __init__(self, in_feature, out_feature):
super(FitnetRegressor, self).__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.regressor = torch.nn.Conv2d(in_feature, out_feature, 1, bias=False
)
torch.nn.init.kaiming_normal_(self.regressor.weight, mode='fan_out',
nonlinearity='relu')
self.regressor.weight.data.uniform_(-0.005, 0.005)
def forward(self, feature):
if feature.dim() == 2:
feature = feature.unsqueeze(2).unsqueeze(3)
return F.relu(self.regressor(feature))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf2
class FitnetRegressorNew(torch.nn.Module):
def __init__(self, in_feature, out_feature):
super(FitnetRegressorNew, self).__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.regressor = torch.nn.Conv2d(in_feature, out_feature, 1, bias=False
)
torch.nn.init.kaiming_normal_(self.regressor.weight, mode='fan_out',
nonlinearity='relu')
self.regressor.weight.data.uniform_(-0.005, 0.005)
def forward(self, input_0):
primals_2 = self.regressor.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| naver-ai/cgl_fairness | FitnetRegressor | false | 7,311 | [
"MIT"
] | 1 | 00d3bec233c9b3e0f88496118abaed8321ca3159 | https://github.com/naver-ai/cgl_fairness/tree/00d3bec233c9b3e0f88496118abaed8321ca3159 | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, in_feature, out_feature):
super().__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.regressor = torch.nn.Conv2d(in_feature, out_feature, 1, bias=False
)
torch.nn.init.kaiming_normal_(self.regressor.weight, mode='fan_out',
nonlinearity='relu')
self.regressor.weight.data.uniform_(-0.005, 0.005)
def forward(self, feature):
if feature.dim() == 2:
feature = feature.unsqueeze(2).unsqueeze(3)
return F.relu(self.regressor(feature))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ZeroOneTest | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vx/cvxreiwqu3j36srlhcbhferosfm7fpntwgkaer7vdvdtvuem3j3r.py
# Topologically Sorted Source Nodes: [sign, sub, truediv, mean, cost, sub_1, sign_1, add, truediv_1, mean_1, mul_1, cost_1], Original ATen: [aten.sign, aten.rsub, aten.div, aten.mean, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# cost => mul
# cost_1 => add_1
# mean => mean
# mean_1 => mean_1
# mul_1 => mul_1
# sign => sign
# sign_1 => sign_1
# sub => sub
# sub_1 => sub_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sign), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %mean), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %sign_1 : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%arg2_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sign_1, 1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %mean_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sign_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sign_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sign_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sign_0(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp15 = tl.load(in_ptr1 + (r0), None)
tmp27 = tl.load(in_ptr2 + (r0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = 1.0
tmp9 = tmp8 - tmp7
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp16 = tmp1 < tmp15
tmp17 = tmp16.to(tl.int8)
tmp18 = tmp15 < tmp1
tmp19 = tmp18.to(tl.int8)
tmp20 = tmp17 - tmp19
tmp21 = tmp20.to(tmp15.dtype)
tmp22 = tmp21 + tmp8
tmp23 = tmp22 * tmp10
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp28 = 256.0
tmp29 = tmp14 / tmp28
tmp30 = tmp27 * tmp29
tmp31 = tmp8 - tmp27
tmp32 = tmp26 / tmp28
tmp33 = tmp31 * tmp32
tmp34 = tmp30 + tmp33
tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp34, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sign, sub, truediv, mean, cost, sub_1, sign_1, add, truediv_1, mean_1, mul_1, cost_1], Original ATen: [aten.sign, aten.rsub, aten.div, aten.mean, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sign_0.run(arg0_1, arg2_1, arg1_1, buf2, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ZeroOneTest(nn.Module):
def __init__(self):
super(ZeroOneTest, self).__init__()
return
def forward(self, output_p, output_n, prior):
cost = prior * torch.mean((1 - torch.sign(output_p)) / 2)
cost = cost + (1 - prior) * torch.mean((1 + torch.sign(output_n)) / 2)
return cost
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sign_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp15 = tl.load(in_ptr1 + r0, None)
tmp27 = tl.load(in_ptr2 + r0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tmp8 = 1.0
tmp9 = tmp8 - tmp7
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp16 = tmp1 < tmp15
tmp17 = tmp16.to(tl.int8)
tmp18 = tmp15 < tmp1
tmp19 = tmp18.to(tl.int8)
tmp20 = tmp17 - tmp19
tmp21 = tmp20.to(tmp15.dtype)
tmp22 = tmp21 + tmp8
tmp23 = tmp22 * tmp10
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp28 = 256.0
tmp29 = tmp14 / tmp28
tmp30 = tmp27 * tmp29
tmp31 = tmp8 - tmp27
tmp32 = tmp26 / tmp28
tmp33 = tmp31 * tmp32
tmp34 = tmp30 + tmp33
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp34, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sign_0[grid(1)](arg0_1,
arg2_1, arg1_1, buf2, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class ZeroOneTestNew(nn.Module):
def __init__(self):
super(ZeroOneTestNew, self).__init__()
return
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| mxuq/Imbalance-PU | ZeroOneTest | false | 7,312 | [
"MIT"
] | 1 | fd4403b05f98ca6bc8156783e8275888d63f6435 | https://github.com/mxuq/Imbalance-PU/tree/fd4403b05f98ca6bc8156783e8275888d63f6435 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
return
def forward(self, output_p, output_n, prior):
cost = prior * torch.mean((1 - torch.sign(output_p)) / 2)
cost = cost + (1 - prior) * torch.mean((1 + torch.sign(output_n)) / 2)
return cost
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
Landsat2ViirsNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nr/cnr5lgijm7k6doqguie5wuabxlbddrcif6m52y5ztaiztmm5lcyy.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [1, 1], [2, 2], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 127008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3969) % 8
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4b/c4bz2cpervebzg4ppazym7mfhsu66vmmvrwxuls7xspzyojrcear.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [3, 3], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 28224
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 441) % 16
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7f/c7fy3q7oposmsrqhqhqkigih2vt5bwmjndsvncunqh3au6dpojja.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [3, 3], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 6272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 32
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q5/cq52xqbok3l6ca7mgpgiietkweau2kddeft43cwd4iryhre3znj2.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3, adaptive_avg_pool2d], Original ATen: [aten.convolution, aten.leaky_relu, aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool2d => mean
# conv2d_3 => convolution_3
# x_3 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.01), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%where_3, [-1, -2], True), kwargs = {})
triton_poi_fused_convolution_leaky_relu_mean_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_mean_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_mean_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 1.0
tmp9 = tmp7 / tmp8
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vx/cvxgikvf4odxfy443anpyiuj4co7fllc5yvys3hays4zemvnnsn2.py
# Topologically Sorted Source Nodes: [mul, std, mul_1, sample], Original ATen: [aten.mul, aten.exp, aten.add]
# Source node to ATen node mapping:
# mul => mul_4
# mul_1 => mul_5
# sample => add
# std => exp
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_2, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_4,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%addmm_1, %mul_5), kwargs = {})
triton_poi_fused_add_exp_mul_4 = async_compile.triton('triton_poi_fused_add_exp_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cq/ccq2e5g5kvi7tfbcukwmpfajz5mdminbpvfxlic7hpri5wujde3c.py
# Topologically Sorted Source Nodes: [conv_transpose2d, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution_4
# x_5 => gt_4, mul_6, where_4
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.01), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_6), kwargs = {})
triton_poi_fused_convolution_leaky_relu_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ki/ckihgwoh3acikuliqzor6rewmvcf7cerswmb6huqltlyvmna3vw7.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_5
# x_6 => gt_5, mul_7, where_5
# Graph fragment:
# %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_20, %primals_21, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.01), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_7), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 16
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3q/c3qz2h27fy6qsibvqlmapk7ijyivrhipmhrk4oddspoivflvrfmz.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, reconstruction], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_6
# reconstruction => sigmoid
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_22, %primals_23, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_sigmoid_7 = async_compile.triton('triton_poi_fused_convolution_sigmoid_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1764
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 = args
args.clear()
assert_size_stride(primals_1, (8, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (64, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (128, 64), (64, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (64, 128), (128, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (64, 128), (128, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (64, 64), (64, 1))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_19, (32, ), (1, ))
assert_size_stride(primals_20, (32, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_21, (16, ), (1, ))
assert_size_stride(primals_22, (16, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_23, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(1, 1), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 63, 63), (31752, 3969, 63, 1))
buf1 = empty_strided_cuda((4, 8, 63, 63), (31752, 3969, 63, 1), torch.bool)
buf2 = empty_strided_cuda((4, 8, 63, 63), (31752, 3969, 63, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 127008, grid=grid(127008), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(3, 3), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 21, 21), (7056, 441, 21, 1))
buf4 = empty_strided_cuda((4, 16, 21, 21), (7056, 441, 21, 1), torch.bool)
buf5 = empty_strided_cuda((4, 16, 21, 21), (7056, 441, 21, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 28224, grid=grid(28224), stream=stream0)
del buf3
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(3, 3), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 7, 7), (1568, 49, 7, 1))
buf7 = empty_strided_cuda((4, 32, 7, 7), (1568, 49, 7, 1), torch.bool)
buf8 = empty_strided_cuda((4, 32, 7, 7), (1568, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf6, primals_7, buf7, buf8, 6272, grid=grid(6272), stream=stream0)
del buf6
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 1, 1), (64, 1, 1, 1))
buf10 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.bool)
buf11 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, x_3, adaptive_avg_pool2d], Original ATen: [aten.convolution, aten.leaky_relu, aten.mean]
triton_poi_fused_convolution_leaky_relu_mean_3.run(buf9, primals_9, buf10, buf11, 256, grid=grid(256), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 128), (1, 64), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf13 = reinterpret_tensor(buf9, (4, 64), (64, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf12, reinterpret_tensor(primals_12, (128, 64), (1, 128), 0), alpha=1, beta=1, out=buf13)
del primals_13
buf14 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_var], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf12, reinterpret_tensor(primals_14, (128, 64), (1, 128), 0), alpha=1, beta=1, out=buf14)
del primals_15
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like]
buf15 = torch.ops.aten.randn.default([4, 64], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf16 = buf15
del buf15
buf17 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, std, mul_1, sample], Original ATen: [aten.mul, aten.exp, aten.add]
triton_poi_fused_add_exp_mul_4.run(buf13, buf16, buf14, buf17, 256, grid=grid(256), stream=stream0)
buf18 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, buf17, reinterpret_tensor(primals_16, (64, 64), (1, 64), 0), alpha=1, beta=1, out=buf18)
del primals_17
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(reinterpret_tensor(buf18, (4, 64, 1, 1), (64, 1, 1, 1), 0), primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 32, 4, 4), (512, 16, 4, 1))
buf20 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf21 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_5.run(buf19, primals_19, buf20, buf21, 2048, grid=grid(2048), stream=stream0)
del buf19
del primals_19
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 16, 10, 10), (1600, 100, 10, 1))
buf23 = empty_strided_cuda((4, 16, 10, 10), (1600, 100, 10, 1), torch.bool)
buf24 = empty_strided_cuda((4, 16, 10, 10), (1600, 100, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf22, primals_21, buf23, buf24, 6400, grid=grid(6400), stream=stream0)
del buf22
del primals_21
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_22, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 1, 21, 21), (441, 441, 21, 1))
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, reconstruction], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_7.run(buf26, primals_23, 1764, grid=grid(1764), stream=stream0)
del primals_23
return (buf26, buf13, buf14, primals_1, primals_3, primals_4, primals_6, primals_8, primals_18, primals_20, primals_22, buf1, buf2, buf4, buf5, buf7, buf8, buf10, reinterpret_tensor(buf11, (4, 64), (64, 1), 0), buf12, buf14, buf16, buf17, reinterpret_tensor(buf18, (4, 64, 1, 1), (64, 1, 1, 1), 0), buf20, buf21, buf23, buf24, buf26, primals_16, primals_14, primals_12, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 256, 256), (196608, 65536, 256, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 32, 7, 7), (1568, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((32, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((16, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class Landsat2ViirsNet(nn.Module):
def __init__(self, latent_dim=64, init_channels=8, kernel_size=4,
image_in_channels=3, image_out_channels=1):
super(Landsat2ViirsNet, self).__init__()
self.enc1 = nn.Conv2d(in_channels=image_in_channels, out_channels=
init_channels, kernel_size=kernel_size, stride=4, padding=1,
dilation=2)
self.enc2 = nn.Conv2d(in_channels=init_channels, out_channels=
init_channels * 2, kernel_size=kernel_size, stride=3, padding=1)
self.enc3 = nn.Conv2d(in_channels=init_channels * 2, out_channels=
init_channels * 4, kernel_size=kernel_size, stride=3, padding=1)
self.enc4 = nn.Conv2d(in_channels=init_channels * 4, out_channels=
64, kernel_size=7, stride=2, padding=0)
self.fc1 = nn.Linear(64, 128)
self.fc_mu = nn.Linear(128, latent_dim)
self.fc_log_var = nn.Linear(128, latent_dim)
self.fc2 = nn.Linear(latent_dim, 64)
self.dec1 = nn.ConvTranspose2d(in_channels=64, out_channels=
init_channels * 4, kernel_size=kernel_size, stride=1, padding=0)
self.dec2 = nn.ConvTranspose2d(in_channels=init_channels * 4,
out_channels=init_channels * 2, kernel_size=kernel_size, stride
=2, padding=0)
self.dec3 = nn.ConvTranspose2d(in_channels=init_channels * 2,
out_channels=image_out_channels, kernel_size=kernel_size + 1,
stride=2, padding=1)
def reparameterize(self, mu, log_var):
"""
:param mu: mean from the encoder's latent space
:param log_var: log variance from the encoder's latent space
"""
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
sample = mu + eps * std
return sample
def forward(self, x):
x = F.leaky_relu(self.enc1(x))
x = F.leaky_relu(self.enc2(x))
x = F.leaky_relu(self.enc3(x))
x = F.leaky_relu(self.enc4(x))
batch, _, _, _ = x.shape
x = F.adaptive_avg_pool2d(x, 1).reshape(batch, -1)
hidden = self.fc1(x)
mu = self.fc_mu(hidden)
log_var = self.fc_log_var(hidden)
z = self.reparameterize(mu, log_var)
z = self.fc2(z)
z = z.view(-1, 64, 1, 1)
x = F.leaky_relu(self.dec1(z))
x = F.leaky_relu(self.dec2(x))
reconstruction = torch.sigmoid(self.dec3(x))
return reconstruction, mu, log_var
def get_inputs():
return [torch.rand([4, 3, 256, 256])]
def get_init_inputs():
return [[], {}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 127008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3969 % 8
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 28224
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 441 % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 6272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_mean_3(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 1.0
tmp9 = tmp7 / tmp8
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1764
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (8, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1))
assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (64, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64), (64, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (64, 128), (128, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 128), (128, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64), (64, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_19, (32,), (1,))
assert_size_stride(primals_20, (32, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_21, (16,), (1,))
assert_size_stride(primals_22, (16, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_23, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(1, 1), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 63, 63), (31752, 3969, 63, 1))
buf1 = empty_strided_cuda((4, 8, 63, 63), (31752, 3969, 63, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 8, 63, 63), (31752, 3969, 63, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(127008)](buf0,
primals_2, buf1, buf2, 127008, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(3, 3),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 21, 21), (7056, 441, 21, 1))
buf4 = empty_strided_cuda((4, 16, 21, 21), (7056, 441, 21, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 16, 21, 21), (7056, 441, 21, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(28224)](buf3,
primals_5, buf4, buf5, 28224, XBLOCK=256, num_warps=4, num_stages=1
)
del buf3
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(3, 3),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 7, 7), (1568, 49, 7, 1))
buf7 = empty_strided_cuda((4, 32, 7, 7), (1568, 49, 7, 1), torch.bool)
buf8 = empty_strided_cuda((4, 32, 7, 7), (1568, 49, 7, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_2[grid(6272)](buf6,
primals_7, buf7, buf8, 6272, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
del primals_7
buf9 = extern_kernels.convolution(buf8, primals_8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 1, 1), (64, 1, 1, 1))
buf10 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.bool)
buf11 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 256, 256), torch.
float32)
triton_poi_fused_convolution_leaky_relu_mean_3[grid(256)](buf9,
primals_9, buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_9
buf12 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (4, 64),
(64, 1), 0), reinterpret_tensor(primals_10, (64, 128), (1, 64),
0), alpha=1, beta=1, out=buf12)
del primals_11
buf13 = reinterpret_tensor(buf9, (4, 64), (64, 1), 0)
del buf9
extern_kernels.addmm(primals_13, buf12, reinterpret_tensor(
primals_12, (128, 64), (1, 128), 0), alpha=1, beta=1, out=buf13)
del primals_13
buf14 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_15, buf12, reinterpret_tensor(
primals_14, (128, 64), (1, 128), 0), alpha=1, beta=1, out=buf14)
del primals_15
buf15 = torch.ops.aten.randn.default([4, 64], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf16 = buf15
del buf15
buf17 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_add_exp_mul_4[grid(256)](buf13, buf16, buf14,
buf17, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_17, buf17, reinterpret_tensor(
primals_16, (64, 64), (1, 64), 0), alpha=1, beta=1, out=buf18)
del primals_17
buf19 = extern_kernels.convolution(reinterpret_tensor(buf18, (4, 64,
1, 1), (64, 1, 1, 1), 0), primals_18, stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=True, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf19, (4, 32, 4, 4), (512, 16, 4, 1))
buf20 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf21 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_5[grid(2048)](buf19,
primals_19, buf20, buf21, 2048, XBLOCK=256, num_warps=4,
num_stages=1)
del buf19
del primals_19
buf22 = extern_kernels.convolution(buf21, primals_20, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 16, 10, 10), (1600, 100, 10, 1))
buf23 = empty_strided_cuda((4, 16, 10, 10), (1600, 100, 10, 1),
torch.bool)
buf24 = empty_strided_cuda((4, 16, 10, 10), (1600, 100, 10, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(6400)](buf22,
primals_21, buf23, buf24, 6400, XBLOCK=256, num_warps=4,
num_stages=1)
del buf22
del primals_21
buf25 = extern_kernels.convolution(buf24, primals_22, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 1, 21, 21), (441, 441, 21, 1))
buf26 = buf25
del buf25
triton_poi_fused_convolution_sigmoid_7[grid(1764)](buf26,
primals_23, 1764, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
return (buf26, buf13, buf14, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_18, primals_20, primals_22, buf1, buf2, buf4,
buf5, buf7, buf8, buf10, reinterpret_tensor(buf11, (4, 64), (64, 1),
0), buf12, buf14, buf16, buf17, reinterpret_tensor(buf18, (4, 64, 1,
1), (64, 1, 1, 1), 0), buf20, buf21, buf23, buf24, buf26,
primals_16, primals_14, primals_12, primals_10)
class Landsat2ViirsNetNew(nn.Module):
def __init__(self, latent_dim=64, init_channels=8, kernel_size=4,
image_in_channels=3, image_out_channels=1):
super(Landsat2ViirsNetNew, self).__init__()
self.enc1 = nn.Conv2d(in_channels=image_in_channels, out_channels=
init_channels, kernel_size=kernel_size, stride=4, padding=1,
dilation=2)
self.enc2 = nn.Conv2d(in_channels=init_channels, out_channels=
init_channels * 2, kernel_size=kernel_size, stride=3, padding=1)
self.enc3 = nn.Conv2d(in_channels=init_channels * 2, out_channels=
init_channels * 4, kernel_size=kernel_size, stride=3, padding=1)
self.enc4 = nn.Conv2d(in_channels=init_channels * 4, out_channels=
64, kernel_size=7, stride=2, padding=0)
self.fc1 = nn.Linear(64, 128)
self.fc_mu = nn.Linear(128, latent_dim)
self.fc_log_var = nn.Linear(128, latent_dim)
self.fc2 = nn.Linear(latent_dim, 64)
self.dec1 = nn.ConvTranspose2d(in_channels=64, out_channels=
init_channels * 4, kernel_size=kernel_size, stride=1, padding=0)
self.dec2 = nn.ConvTranspose2d(in_channels=init_channels * 4,
out_channels=init_channels * 2, kernel_size=kernel_size, stride
=2, padding=0)
self.dec3 = nn.ConvTranspose2d(in_channels=init_channels * 2,
out_channels=image_out_channels, kernel_size=kernel_size + 1,
stride=2, padding=1)
def reparameterize(self, mu, log_var):
"""
:param mu: mean from the encoder's latent space
:param log_var: log variance from the encoder's latent space
"""
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
sample = mu + eps * std
return sample
def forward(self, input_0):
primals_1 = self.enc1.weight
primals_2 = self.enc1.bias
primals_4 = self.enc2.weight
primals_5 = self.enc2.bias
primals_6 = self.enc3.weight
primals_7 = self.enc3.bias
primals_8 = self.enc4.weight
primals_9 = self.enc4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_12 = self.fc_mu.weight
primals_13 = self.fc_mu.bias
primals_14 = self.fc_log_var.weight
primals_15 = self.fc_log_var.bias
primals_16 = self.fc2.weight
primals_17 = self.fc2.bias
primals_18 = self.dec1.weight
primals_19 = self.dec1.bias
primals_20 = self.dec2.weight
primals_21 = self.dec2.bias
primals_22 = self.dec3.weight
primals_23 = self.dec3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0], output[1], output[2]
| mrmauer/detecting_poverty | Landsat2ViirsNet | false | 7,313 | [
"MIT"
] | 1 | 2c8a28295264674f5bfe06ef1fed6dd8b898b8b5 | https://github.com/mrmauer/detecting_poverty/tree/2c8a28295264674f5bfe06ef1fed6dd8b898b8b5 | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, latent_dim=64, init_channels=8, kernel_size=4,
image_in_channels=3, image_out_channels=1):
super().__init__()
self.enc1 = nn.Conv2d(in_channels=image_in_channels, out_channels=
init_channels, kernel_size=kernel_size, stride=4, padding=1,
dilation=2)
self.enc2 = nn.Conv2d(in_channels=init_channels, out_channels=
init_channels * 2, kernel_size=kernel_size, stride=3, padding=1)
self.enc3 = nn.Conv2d(in_channels=init_channels * 2, out_channels=
init_channels * 4, kernel_size=kernel_size, stride=3, padding=1)
self.enc4 = nn.Conv2d(in_channels=init_channels * 4, out_channels=
64, kernel_size=7, stride=2, padding=0)
self.fc1 = nn.Linear(64, 128)
self.fc_mu = nn.Linear(128, latent_dim)
self.fc_log_var = nn.Linear(128, latent_dim)
self.fc2 = nn.Linear(latent_dim, 64)
self.dec1 = nn.ConvTranspose2d(in_channels=64, out_channels=
init_channels * 4, kernel_size=kernel_size, stride=1, padding=0)
self.dec2 = nn.ConvTranspose2d(in_channels=init_channels * 4,
out_channels=init_channels * 2, kernel_size=kernel_size, stride
=2, padding=0)
self.dec3 = nn.ConvTranspose2d(in_channels=init_channels * 2,
out_channels=image_out_channels, kernel_size=kernel_size + 1,
stride=2, padding=1)
def reparameterize(self, mu, log_var):
"""
:param mu: mean from the encoder's latent space
:param log_var: log variance from the encoder's latent space
"""
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
sample = mu + eps * std
return sample
def forward(self, x):
x = F.leaky_relu(self.enc1(x))
x = F.leaky_relu(self.enc2(x))
x = F.leaky_relu(self.enc3(x))
x = F.leaky_relu(self.enc4(x))
batch, _, _, _ = x.shape
x = F.adaptive_avg_pool2d(x, 1).reshape(batch, -1)
hidden = self.fc1(x)
mu = self.fc_mu(hidden)
log_var = self.fc_log_var(hidden)
z = self.reparameterize(mu, log_var)
z = self.fc2(z)
z = z.view(-1, 64, 1, 1)
x = F.leaky_relu(self.dec1(z))
x = F.leaky_relu(self.dec2(x))
reconstruction = torch.sigmoid(self.dec3(x))
return reconstruction, mu, log_var
def get_inputs():
return [torch.rand([4, 3, 256, 256])]
def get_init_inputs():
return []
|
VertexDirectEmbedder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xq/cxqinuparlha25j4geyv6tolvpah7qdqdkpecjesyn3kblysszql.py
# Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
# Source node to ATen node mapping:
# clamp => clamp_min
# norm => pow_1, pow_2, sum_1
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {})
triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedder(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedder, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
def forward(self) ->torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(self.embeddings)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def get_inputs():
return []
def get_init_inputs():
return [[], {'num_vertices': 4, 'embed_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(16)](primals_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf0, primals_1
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class VertexDirectEmbedderNew(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super(VertexDirectEmbedderNew, self).__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def forward(self):
primals_1 = self.embeddings
output = call([primals_1])
return output[0]
| nationaldronesau/detectron2 | VertexDirectEmbedder | false | 7,314 | [
"Apache-2.0"
] | 1 | 6afaee60eb6e0032b5b2edfbec1179f7e7b7b75f | https://github.com/nationaldronesau/detectron2/tree/6afaee60eb6e0032b5b2edfbec1179f7e7b7b75f | import torch
import torch.utils.data
from torch import nn
def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06
) ->torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=
True), min=epsilon)
class Model(nn.Module):
"""
Class responsible for embedding vertices. Vertex embeddings take
the form of a tensor of size [N, D], where
N = number of vertices
D = number of dimensions in the embedding space
"""
def __init__(self, num_vertices: 'int', embed_dim: 'int'):
"""
Initialize embedder, set random embeddings
Args:
num_vertices (int): number of vertices to embed
embed_dim (int): number of dimensions in the embedding space
"""
super().__init__()
self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
self.reset_parameters()
@torch.no_grad()
def reset_parameters(self):
"""
Reset embeddings to random values
"""
torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5)
def forward(self) ->torch.Tensor:
"""
Produce vertex embeddings, a tensor of shape [N, D] where:
N = number of vertices
D = number of dimensions in the embedding space
Return:
Full vertex embeddings, a tensor of shape [N, D]
"""
return normalize_embeddings(self.embeddings)
@torch.no_grad()
def load(self, fpath: 'str'):
"""
Load data from a file
Args:
fpath (str): file path to load data from
"""
with PathManager.open(fpath, 'rb') as hFile:
data = pickle.load(hFile)
for name in ['embeddings']:
if name in data:
getattr(self, name).copy_(torch.tensor(data[name]).float())
def get_inputs():
return []
def get_init_inputs():
return [4, 4]
|
xTanH | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5f/c5fec27nvlh3c6rdlcdnvokq2ou7ezxyojlesd7cvhvav5dyuta3.py
# Topologically Sorted Source Nodes: [tanh, sub], Original ATen: [aten.tanh, aten.sub]
# Source node to ATen node mapping:
# sub => sub
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %tanh), kwargs = {})
triton_poi_fused_sub_tanh_0 = async_compile.triton('triton_poi_fused_sub_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, sub], Original ATen: [aten.tanh, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class xTanH(torch.nn.Module):
def forward(self, x: 'torch.Tensor'):
return x - torch.tanh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sub_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class xTanHNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| nayyarv/bayesnets | xTanH | false | 7,315 | [
"MIT"
] | 1 | 090abd1a0a91c2b9d6d57a182ee5be1f65a22e11 | https://github.com/nayyarv/bayesnets/tree/090abd1a0a91c2b9d6d57a182ee5be1f65a22e11 | import torch
import torch.nn
class Model(torch.nn.Module):
def forward(self, x: 'torch.Tensor'):
return x - torch.tanh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LRN | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5u/c5u7jgtbilvlp5oee3xzrm3bmkras7mb3t7afiv32pyh7z4mtdml.py
# Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# div => pow_1
# div_1 => avg_pool2d
# div_2 => pow_2
# mul => mul
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [1, 1], [1, 1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.0), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {})
triton_poi_fused_add_avg_pool2d_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_div_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=
False):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 + tmp2
tmp6 = 0.75
tmp7 = libdevice.pow(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_div_mul_pow_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LRNNew(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=
False):
super(LRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| nbswords/Paper-implemention-by-Pytorch | LRN | false | 7,316 | [
"MIT"
] | 1 | 429514c4f51c41ec7b3013683fb79ad4b4ab4638 | https://github.com/nbswords/Paper-implemention-by-Pytorch/tree/429514c4f51c41ec7b3013683fb79ad4b4ab4638 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=
False):
super().__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/e4/ce4mrsutak55hdxdhpgam7tb7jmol4afzeboi3c3ftdbbg56ulio.py
# Topologically Sorted Source Nodes: [cross_entropy, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# cross_entropy => exp, log, mul, neg, sub_1, sum_1, sum_2
# loss => mul_1
# mean => mean
# neg => neg_1
# p => exp_1
# pow_1 => pow_1
# sub => sub_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%neg,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %neg), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {})
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp31 = tmp30 - tmp29
tmp32 = libdevice.sqrt(tmp31)
tmp33 = tmp32 * tmp27
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp36 = tl.sum(tmp34, 1)[:, None]
tmp37 = 64.0
tmp38 = tmp36 / tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp38, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cross_entropy, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean]
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1.run(buf3, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def focal_loss(input_values, gamma=10):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=0.5):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input, target):
return focal_loss(F.cross_entropy(input, target, reduction='none',
weight=self.weight), self.gamma)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp31 = tmp30 - tmp29
tmp32 = libdevice.sqrt(tmp31)
tmp33 = tmp32 * tmp27
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp36 = tl.sum(tmp34, 1)[:, None]
tmp37 = 64.0
tmp38 = tmp36 / tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)](
buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
def focal_loss(input_values, gamma=10):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLossNew(nn.Module):
def __init__(self, weight=None, gamma=0.5):
super(FocalLossNew, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| naver-ai/cgl_fairness | FocalLoss | false | 7,317 | [
"MIT"
] | 1 | 00d3bec233c9b3e0f88496118abaed8321ca3159 | https://github.com/naver-ai/cgl_fairness/tree/00d3bec233c9b3e0f88496118abaed8321ca3159 | import torch
import torch.nn as nn
import torch.nn.functional as F
def focal_loss(input_values, gamma=10):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class Model(nn.Module):
def __init__(self, weight=None, gamma=0.5):
super().__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input, target):
return focal_loss(F.cross_entropy(input, target, reduction='none',
weight=self.weight), self.gamma)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiHeadSelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7q/c7qatvt7mqunxr7uiwohrunnhsrswmwm2muzdnbh6g5mxf3pbhas.py
# Topologically Sorted Source Nodes: [attention_mask, attention_mask_1, truediv, attention_score, attention_score_1], Original ATen: [aten.full, aten.triu, aten.div, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attention_mask => full_default
# attention_mask_1 => full_default_1, ge, sub, where
# attention_score => add
# attention_score_1 => amax, exp, sub_1, sum_1
# truediv => div
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%sub, 1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %full_default, %full_default_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %where), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_div_full_triu_1 = async_compile.triton('triton_poi_fused__softmax_add_div_full_triu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_full_triu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_full_triu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = (-1)*x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = float("-inf")
tmp7 = 0.0
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp2 + tmp8
tmp11 = tmp10 * tmp1
tmp12 = 1 + ((-1)*x0)
tmp13 = tmp12 >= tmp4
tmp14 = tl.where(tmp13, tmp6, tmp7)
tmp15 = tmp11 + tmp14
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = tmp17 * tmp1
tmp19 = 2 + ((-1)*x0)
tmp20 = tmp19 >= tmp4
tmp21 = tl.where(tmp20, tmp6, tmp7)
tmp22 = tmp18 + tmp21
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = tmp24 * tmp1
tmp26 = 3 + ((-1)*x0)
tmp27 = tmp26 >= tmp4
tmp28 = tl.where(tmp27, tmp6, tmp7)
tmp29 = tmp25 + tmp28
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tmp9 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp15 - tmp30
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tmp36 = tmp22 - tmp30
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp29 - tmp30
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tl.store(out_ptr0 + (x2), tmp30, xmask)
tl.store(out_ptr1 + (x2), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7x/c7xn7qjh34kwi2jkyz5l7lkdaajxitzvw2a7xfapjnaswkkx3zrk.py
# Topologically Sorted Source Nodes: [attention_mask, attention_mask_1, truediv, attention_score, attention_score_1], Original ATen: [aten.full, aten.triu, aten.div, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attention_mask => full_default
# attention_mask_1 => full_default_1, ge, sub, where
# attention_score => add
# attention_score_1 => div_1, exp, sub_1
# truediv => div
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%sub, 1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %full_default, %full_default_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %where), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_div_full_triu_2 = async_compile.triton('triton_poi_fused__softmax_add_div_full_triu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_full_triu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_full_triu_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x4 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp10 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = x0 + ((-1)*x1)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = float("-inf")
tmp7 = 0.0
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp2 + tmp8
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp14 = tmp12 / tmp13
tl.store(in_out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attention_mask, attention_mask_1, truediv, attention_score, attention_score_1], Original ATen: [aten.full, aten.triu, aten.div, aten.add, aten._softmax]
triton_poi_fused__softmax_add_div_full_triu_1.run(buf5, buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [attention_mask, attention_mask_1, truediv, attention_score, attention_score_1], Original ATen: [aten.full, aten.triu, aten.div, aten.add, aten._softmax]
triton_poi_fused__softmax_add_div_full_triu_2.run(buf8, buf6, buf7, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_7, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [score_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_9
return (reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), primals_8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.distributed
import torch.nn.functional as F
import torch.nn as nn
class MultiHeadSelfAttention(nn.Module):
def __init__(self, config):
super(MultiHeadSelfAttention, self).__init__()
self.query = nn.Linear(config.hidden_size, config.hidden_size)
self.key = nn.Linear(config.hidden_size, config.hidden_size)
self.value = nn.Linear(config.hidden_size, config.hidden_size)
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
assert config.hidden_size % config.n_heads == 0, 'Hidden size should be multiple of n_heads'
self.n_heads = config.n_heads
self.head_size = config.hidden_size // self.n_heads
def forward(self, x):
batch_size, seq_length, hidden_size = x.size()
q = self.query(x).view(batch_size, seq_length, self.n_heads, self.
head_size).transpose(1, 2)
k = self.key(x).view(batch_size, seq_length, self.head_size, self.
n_heads).transpose(1, 3)
v = self.value(x).view(batch_size, seq_length, self.n_heads, self.
head_size).transpose(1, 2)
attention_mask = torch.full((seq_length, seq_length), -float('inf'),
device=x.device, dtype=x.dtype)
attention_mask = torch.triu(attention_mask, diagonal=1)
attention_score = torch.matmul(q, k) / math.sqrt(self.head_size
) + attention_mask
attention_score = F.softmax(attention_score, dim=-1)
attention_score = self.attn_drop(attention_score)
score = torch.matmul(attention_score, v)
score = score.transpose(1, 2).contiguous().view(batch_size,
seq_length, hidden_size)
score = self.proj(score)
score = self.resid_drop(score)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, attn_pdrop=0.5,
resid_pdrop=0.5, n_heads=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.distributed
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_div_full_triu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = -1 * x0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = float('-inf')
tmp7 = 0.0
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp2 + tmp8
tmp11 = tmp10 * tmp1
tmp12 = 1 + -1 * x0
tmp13 = tmp12 >= tmp4
tmp14 = tl.where(tmp13, tmp6, tmp7)
tmp15 = tmp11 + tmp14
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp18 = tmp17 * tmp1
tmp19 = 2 + -1 * x0
tmp20 = tmp19 >= tmp4
tmp21 = tl.where(tmp20, tmp6, tmp7)
tmp22 = tmp18 + tmp21
tmp23 = triton_helpers.maximum(tmp16, tmp22)
tmp25 = tmp24 * tmp1
tmp26 = 3 + -1 * x0
tmp27 = tmp26 >= tmp4
tmp28 = tl.where(tmp27, tmp6, tmp7)
tmp29 = tmp25 + tmp28
tmp30 = triton_helpers.maximum(tmp23, tmp29)
tmp31 = tmp9 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp15 - tmp30
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tmp36 = tmp22 - tmp30
tmp37 = tl_math.exp(tmp36)
tmp38 = tmp35 + tmp37
tmp39 = tmp29 - tmp30
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp38 + tmp40
tl.store(out_ptr0 + x2, tmp30, xmask)
tl.store(out_ptr1 + x2, tmp41, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_full_triu_2(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x4 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp10 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = x0 + -1 * x1
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp6 = float('-inf')
tmp7 = 0.0
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp2 + tmp8
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp14 = tmp12 / tmp13
tl.store(in_out_ptr0 + x3, tmp14, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_div_full_triu_1[grid(64)](buf5, buf6,
buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_add_div_full_triu_2[grid(256)](buf8, buf6,
buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_9, reinterpret_tensor(buf11, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_9
return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), primals_8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class MultiHeadSelfAttentionNew(nn.Module):
def __init__(self, config):
super(MultiHeadSelfAttentionNew, self).__init__()
self.query = nn.Linear(config.hidden_size, config.hidden_size)
self.key = nn.Linear(config.hidden_size, config.hidden_size)
self.value = nn.Linear(config.hidden_size, config.hidden_size)
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
assert config.hidden_size % config.n_heads == 0, 'Hidden size should be multiple of n_heads'
self.n_heads = config.n_heads
self.head_size = config.hidden_size // self.n_heads
def forward(self, input_0):
primals_2 = self.query.weight
primals_3 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_8 = self.proj.weight
primals_9 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| myoons/image-gpt-pytorch | MultiHeadSelfAttention | false | 7,318 | [
"Apache-2.0"
] | 1 | d05081250d01ce208796dfb246ea1c9a093237c5 | https://github.com/myoons/image-gpt-pytorch/tree/d05081250d01ce208796dfb246ea1c9a093237c5 | from _paritybench_helpers import _mock_config
import math
import torch
import torch.distributed
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.query = nn.Linear(config.hidden_size, config.hidden_size)
self.key = nn.Linear(config.hidden_size, config.hidden_size)
self.value = nn.Linear(config.hidden_size, config.hidden_size)
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
assert config.hidden_size % config.n_heads == 0, 'Hidden size should be multiple of n_heads'
self.n_heads = config.n_heads
self.head_size = config.hidden_size // self.n_heads
def forward(self, x):
batch_size, seq_length, hidden_size = x.size()
q = self.query(x).view(batch_size, seq_length, self.n_heads, self.
head_size).transpose(1, 2)
k = self.key(x).view(batch_size, seq_length, self.head_size, self.
n_heads).transpose(1, 3)
v = self.value(x).view(batch_size, seq_length, self.n_heads, self.
head_size).transpose(1, 2)
attention_mask = torch.full((seq_length, seq_length), -float('inf'),
device=x.device, dtype=x.dtype)
attention_mask = torch.triu(attention_mask, diagonal=1)
attention_score = torch.matmul(q, k) / math.sqrt(self.head_size
) + attention_mask
attention_score = F.softmax(attention_score, dim=-1)
attention_score = self.attn_drop(attention_score)
score = torch.matmul(attention_score, v)
score = score.transpose(1, 2).contiguous().view(batch_size,
seq_length, hidden_size)
score = self.proj(score)
score = self.resid_drop(score)
return score
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, attn_pdrop=0.5,
resid_pdrop=0.5, n_heads=4)}]
|
Matcher | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ht/chthqmd7ami6gjlbitivlw6j42l7ccuvzbocn5fett3pybu6vkio.py
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# res => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3) + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x5), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2w/c2we4npvhka5swgyfnb3e645i3kzleot2woxqx4zqghwchtmbg4e.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_6, 2.0), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tx], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
buf3 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf4, 256, grid=grid(256), stream=stream0)
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class Matcher(nn.Module):
"""
Matching between a pair of nodes to conduct link prediction.
Use multi-head attention as matching model.
"""
def __init__(self, n_hid):
super(Matcher, self).__init__()
self.left_linear = nn.Linear(n_hid, n_hid)
self.right_linear = nn.Linear(n_hid, n_hid)
self.sqrt_hd = math.sqrt(n_hid)
self.cache = None
def forward(self, x, y, infer=False, pair=False):
ty = self.right_linear(y)
if infer:
"""
During testing, we will consider millions or even billions of nodes as candidates (x).
It's not possible to calculate them again for different query (y)
Since the model is fixed, we propose to cache them, and dirrectly use the results.
"""
if self.cache is not None:
tx = self.cache
else:
tx = self.left_linear(x)
self.cache = tx
else:
tx = self.left_linear(x)
if pair:
res = (tx * ty).sum(dim=-1)
else:
res = torch.matmul(tx, ty.transpose(0, 1))
return res / self.sqrt_hd
def __repr__(self):
return '{}(n_hid={})'.format(self.__class__.__name__, self.n_hid)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_hid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex % 16
x0 = xindex % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x5, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](buf0, primals_2, buf2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_div_1[grid(256)](buf4, 256, XBLOCK=256, num_warps=
4, num_stages=1)
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
class MatcherNew(nn.Module):
"""
Matching between a pair of nodes to conduct link prediction.
Use multi-head attention as matching model.
"""
def __init__(self, n_hid):
super(MatcherNew, self).__init__()
self.left_linear = nn.Linear(n_hid, n_hid)
self.right_linear = nn.Linear(n_hid, n_hid)
self.sqrt_hd = math.sqrt(n_hid)
self.cache = None
def __repr__(self):
return '{}(n_hid={})'.format(self.__class__.__name__, self.n_hid)
def forward(self, input_0, input_1):
primals_1 = self.left_linear.weight
primals_2 = self.left_linear.bias
primals_4 = self.right_linear.weight
primals_5 = self.right_linear.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| nchungvh/pyhgt | Matcher | false | 7,319 | [
"MIT"
] | 1 | 3cb08ea856ca02aaf1664aa7486024a8742c7567 | https://github.com/nchungvh/pyhgt/tree/3cb08ea856ca02aaf1664aa7486024a8742c7567 | import math
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Matching between a pair of nodes to conduct link prediction.
Use multi-head attention as matching model.
"""
def __init__(self, n_hid):
super().__init__()
self.left_linear = nn.Linear(n_hid, n_hid)
self.right_linear = nn.Linear(n_hid, n_hid)
self.sqrt_hd = math.sqrt(n_hid)
self.cache = None
def forward(self, x, y, infer=False, pair=False):
ty = self.right_linear(y)
if infer:
"""
During testing, we will consider millions or even billions of nodes as candidates (x).
It's not possible to calculate them again for different query (y)
Since the model is fixed, we propose to cache them, and dirrectly use the results.
"""
if self.cache is not None:
tx = self.cache
else:
tx = self.left_linear(x)
self.cache = tx
else:
tx = self.left_linear(x)
if pair:
res = (tx * ty).sum(dim=-1)
else:
res = torch.matmul(tx, ty.transpose(0, 1))
return res / self.sqrt_hd
def __repr__(self):
return '{}(n_hid={})'.format(self.__class__.__name__, self.n_hid)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Q | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ff/cffi7vxidma5gei4f6wznc3qzapljmsv5w6dvkcys2pj7dzl4a37.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (50, 50), (50, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (4, 50), (50, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 3200, grid=grid(3200), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 50), (1, 50), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 50), (800, 200, 50, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 3200, grid=grid(3200), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 50), (50, 1), 0), reinterpret_tensor(primals_6, (50, 4), (1, 50), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(buf3, (64, 50), (50, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((50, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Q(nn.Module):
"""
Simple fully connected Q function. Also used for skip-Q when concatenating behaviour action and state together.
Used for simpler environments such as mountain-car or lunar-lander.
"""
def __init__(self, state_dim, action_dim, non_linearity=F.relu,
hidden_dim=50):
super(Q, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, action_dim)
self._non_linearity = non_linearity
def forward(self, x):
x = self._non_linearity(self.fc1(x))
x = self._non_linearity(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (50, 4), (4, 1))
assert_size_stride(primals_2, (50,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (50, 50), (50, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (4, 50), (50, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1,
primals_2, buf6, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0),
reinterpret_tensor(primals_4, (50, 50), (1, 50), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 50), (800, 200, 50, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf3,
primals_5, buf5, 3200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 50),
(50, 1), 0), reinterpret_tensor(primals_6, (50, 4), (1, 50), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(
buf3, (64, 50), (50, 1), 0), primals_6, buf5, primals_4, buf6
class QNew(nn.Module):
"""
Simple fully connected Q function. Also used for skip-Q when concatenating behaviour action and state together.
Used for simpler environments such as mountain-car or lunar-lander.
"""
def __init__(self, state_dim, action_dim, non_linearity=F.relu,
hidden_dim=50):
super(QNew, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, action_dim)
self._non_linearity = non_linearity
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ndangtt/LeadingOnesDAC | Q | false | 7,320 | [
"Apache-2.0"
] | 1 | 953747d8702f179851d7973c65779a1f830e03a1 | https://github.com/ndangtt/LeadingOnesDAC/tree/953747d8702f179851d7973c65779a1f830e03a1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Simple fully connected Q function. Also used for skip-Q when concatenating behaviour action and state together.
Used for simpler environments such as mountain-car or lunar-lander.
"""
def __init__(self, state_dim, action_dim, non_linearity=F.relu,
hidden_dim=50):
super().__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, action_dim)
self._non_linearity = non_linearity
def forward(self, x):
x = self._non_linearity(self.fc1(x))
x = self._non_linearity(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DropoutModel8x8 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bt/cbtjbmr5m5c4mhbqxveibnhvbqjvrlxo2hhzizsqqqjncnkataks.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v7/cv7gcfw662u7ngkptjdt6rpvhxgnpbp6chl34unrai3vjfdilka7.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gl/cgl6ves5ty3jwlbi6fjlzhdf377vzaqjlefere5arwdb454puopk.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 430592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3364) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bj/cbjbrcq2c4lnhq6lfkwbnisuiukkuysklcohvckvnqhyf2svdo6b.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 430592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3364) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/od/code5feml4gavu33w47mbgt2wqi3jihgm7zvlbtlb5ps56qnbbff.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/g5/cg5lsjamxvv2aeqai5phnpjdf52tngyrfukzk3ljx5djh7pp66rl.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_5 => convolution_5
# Graph fragment:
# %convolution_5 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6s/c6swfjx2l4pjvp3jilcida2kynci64icvero76ct3j2eygyj7uzn.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_6], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_6
# x_6 => sigmoid
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_5, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_sigmoid_6 = async_compile.triton('triton_poi_fused_convolution_sigmoid_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_13, (8, ), (1, ))
assert_size_stride(primals_14, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 62, 62), (30752, 3844, 62, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 123008, grid=grid(123008), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 60, 60), (57600, 3600, 60, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 230400, grid=grid(230400), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 58, 58), (107648, 3364, 58, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 430592, grid=grid(430592), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 58, 58), (107648, 3364, 58, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf7, primals_9, 430592, grid=grid(430592), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 60, 60), (57600, 3600, 60, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(buf9, primals_11, 230400, grid=grid(230400), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 8, 62, 62), (30752, 3844, 62, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf11, primals_13, 123008, grid=grid(123008), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_6], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_6.run(buf13, primals_15, 65536, grid=grid(65536), stream=stream0)
del primals_15
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9, buf11, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as func
class DropoutModel8x8(nn.Module):
def __init__(self, channel):
"""
Define useful layers
Argument:
channel: number of channel, or depth or number of different sprite types
"""
super(DropoutModel8x8, self).__init__()
self.dropout_1 = nn.Dropout2d(0.3)
self.conv_1 = nn.Conv2d(channel, channel * 2, kernel_size=3, stride=1)
self.conv_2 = nn.Conv2d(channel * 2, channel * 4, kernel_size=3,
stride=1)
self.conv_3 = nn.Conv2d(channel * 4, channel * 8, kernel_size=3,
stride=1)
self.conv_middle = nn.Conv2d(channel * 8, channel * 8, kernel_size=
3, stride=1, padding=1)
self.conv_T1 = nn.ConvTranspose2d(channel * 8, channel * 4,
kernel_size=3, stride=1)
self.conv_T2 = nn.ConvTranspose2d(channel * 4, channel * 2,
kernel_size=3, stride=1)
self.conv_T3 = nn.ConvTranspose2d(channel * 2, channel, kernel_size
=3, stride=1)
def forward(self, x):
if self.training:
x = self.dropout_1(x)
x = func.relu(self.conv_1(x))
x = func.relu(self.conv_2(x))
x = func.relu(self.conv_3(x))
x = self.conv_middle(x)
x = self.conv_T1(x)
x = self.conv_T2(x)
x = torch.sigmoid(self.conv_T3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 430592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3364 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 430592
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3364 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_13, (8,), (1,))
assert_size_stride(primals_14, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 62, 62), (30752, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(123008)](buf1, primals_2,
123008, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 60, 60), (57600, 3600, 60, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(230400)](buf3, primals_5,
230400, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 58, 58), (107648, 3364, 58, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(430592)](buf5, primals_7,
430592, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 58, 58), (107648, 3364, 58, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(430592)](buf7, primals_9,
430592, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 60, 60), (57600, 3600, 60, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_4[grid(230400)](buf9, primals_11,
230400, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 8, 62, 62), (30752, 3844, 62, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_5[grid(123008)](buf11, primals_13,
123008, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_sigmoid_6[grid(65536)](buf13,
primals_15, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_15
return (buf13, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9,
buf11, buf13)
class DropoutModel8x8New(nn.Module):
def __init__(self, channel):
"""
Define useful layers
Argument:
channel: number of channel, or depth or number of different sprite types
"""
super(DropoutModel8x8New, self).__init__()
self.dropout_1 = nn.Dropout2d(0.3)
self.conv_1 = nn.Conv2d(channel, channel * 2, kernel_size=3, stride=1)
self.conv_2 = nn.Conv2d(channel * 2, channel * 4, kernel_size=3,
stride=1)
self.conv_3 = nn.Conv2d(channel * 4, channel * 8, kernel_size=3,
stride=1)
self.conv_middle = nn.Conv2d(channel * 8, channel * 8, kernel_size=
3, stride=1, padding=1)
self.conv_T1 = nn.ConvTranspose2d(channel * 8, channel * 4,
kernel_size=3, stride=1)
self.conv_T2 = nn.ConvTranspose2d(channel * 4, channel * 2,
kernel_size=3, stride=1)
self.conv_T3 = nn.ConvTranspose2d(channel * 2, channel, kernel_size
=3, stride=1)
def forward(self, input_0):
primals_1 = self.conv_1.weight
primals_2 = self.conv_1.bias
primals_4 = self.conv_2.weight
primals_5 = self.conv_2.bias
primals_6 = self.conv_3.weight
primals_7 = self.conv_3.bias
primals_8 = self.conv_middle.weight
primals_9 = self.conv_middle.bias
primals_10 = self.conv_T1.weight
primals_11 = self.conv_T1.bias
primals_12 = self.conv_T2.weight
primals_13 = self.conv_T2.bias
primals_14 = self.conv_T3.weight
primals_15 = self.conv_T3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| mwxely/Cross-domain-PCGML-Level-Generator | DropoutModel8x8 | false | 7,321 | [
"MIT"
] | 1 | baa5d214d6cf22272d144aa6c444a778ac202afe | https://github.com/mwxely/Cross-domain-PCGML-Level-Generator/tree/baa5d214d6cf22272d144aa6c444a778ac202afe | import torch
import torch.nn as nn
import torch.nn.functional as func
class Model(nn.Module):
def __init__(self, channel):
"""
Define useful layers
Argument:
channel: number of channel, or depth or number of different sprite types
"""
super().__init__()
self.dropout_1 = nn.Dropout2d(0.3)
self.conv_1 = nn.Conv2d(channel, channel * 2, kernel_size=3, stride=1)
self.conv_2 = nn.Conv2d(channel * 2, channel * 4, kernel_size=3,
stride=1)
self.conv_3 = nn.Conv2d(channel * 4, channel * 8, kernel_size=3,
stride=1)
self.conv_middle = nn.Conv2d(channel * 8, channel * 8, kernel_size=
3, stride=1, padding=1)
self.conv_T1 = nn.ConvTranspose2d(channel * 8, channel * 4,
kernel_size=3, stride=1)
self.conv_T2 = nn.ConvTranspose2d(channel * 4, channel * 2,
kernel_size=3, stride=1)
self.conv_T3 = nn.ConvTranspose2d(channel * 2, channel, kernel_size
=3, stride=1)
def forward(self, x):
if self.training:
x = self.dropout_1(x)
x = func.relu(self.conv_1(x))
x = func.relu(self.conv_2(x))
x = func.relu(self.conv_3(x))
x = self.conv_middle(x)
x = self.conv_T1(x)
x = self.conv_T2(x)
x = torch.sigmoid(self.conv_T3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4]
|
Attn | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bl/cblmokvcpsr2ttllzsqpn7e5if5ssmadzarqlyj626zemyxwynho.py
# Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# repeat => repeat
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [4, 1, 1]), kwargs = {})
triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/el/celbwovrpqdj7xcqvgqem6nca5pczte5x37qb6upvds3ndwqwm5d.py
# Topologically Sorted Source Nodes: [relu, attentions], Original ATen: [aten.relu, aten._softmax, aten.threshold_backward]
# Source node to ATen node mapping:
# attentions => amax, exp, sub
# relu => relu
# Graph fragment:
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused__softmax_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused__softmax_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tmp16 = 0.0
tmp17 = tmp2 <= tmp16
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [attentions], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attentions => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pa/cpatphiiryly7si3mmav4yhq6he4gwiz2bx745alhtxbbb5643hi.py
# Topologically Sorted Source Nodes: [weighted_input], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weighted_input => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [repeat], Original ATen: [aten.repeat]
stream0 = get_raw_stream(0)
triton_poi_fused_repeat_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [repeat, weights], Original ATen: [aten.repeat, aten.bmm]
extern_kernels.bmm(primals_1, buf0, out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu, attentions], Original ATen: [aten.relu, aten._softmax, aten.threshold_backward]
triton_poi_fused__softmax_relu_threshold_backward_1.run(buf1, buf2, buf5, 16, grid=grid(16), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attentions], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weighted_input], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(primals_1, buf3, buf4, 64, grid=grid(64), stream=stream0)
return (buf4, buf3, primals_1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy import sqrt
class Attn(nn.Module):
def __init__(self, hidden_size, batch_first=True):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.batch_first = batch_first
self.weights = nn.Parameter(torch.Tensor(hidden_size, 1))
stdv = 1.0 / sqrt(self.hidden_size)
for weight in self.weights:
nn.init.uniform_(weight, -stdv, stdv)
def forward(self, x):
if self.batch_first:
batch_size, _seq_size = x.size()[:2]
else:
_seq_size, batch_size = x.size()[:2]
weights = torch.bmm(x, self.weights.unsqueeze(0).repeat(batch_size,
1, 1))
attentions = torch.softmax(F.relu(weights.squeeze()), dim=-1)
weighted_input = torch.mul(x, attentions.unsqueeze(-1).expand_as(x))
return weighted_input, attentions
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from numpy import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_threshold_backward_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tmp16 = 0.0
tmp17 = tmp2 <= tmp16
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(primals_1, buf0, out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused__softmax_relu_threshold_backward_1[grid(16)](buf1,
buf2, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_3[grid(64)](primals_1, buf3, buf4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
return buf4, buf3, primals_1, buf3, buf5
class AttnNew(nn.Module):
def __init__(self, hidden_size, batch_first=True):
super(AttnNew, self).__init__()
self.hidden_size = hidden_size
self.batch_first = batch_first
self.weights = nn.Parameter(torch.Tensor(hidden_size, 1))
stdv = 1.0 / sqrt(self.hidden_size)
for weight in self.weights:
nn.init.uniform_(weight, -stdv, stdv)
def forward(self, input_0):
primals_2 = self.weights
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| nauhc/biLSTM-many-to-one | Attn | false | 7,322 | [
"MIT"
] | 1 | 14dab1c75b395c88bdddfe751461af7dc30e1166 | https://github.com/nauhc/biLSTM-many-to-one/tree/14dab1c75b395c88bdddfe751461af7dc30e1166 | import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy import sqrt
class Model(nn.Module):
def __init__(self, hidden_size, batch_first=True):
super().__init__()
self.hidden_size = hidden_size
self.batch_first = batch_first
self.weights = nn.Parameter(torch.Tensor(hidden_size, 1))
stdv = 1.0 / sqrt(self.hidden_size)
for weight in self.weights:
nn.init.uniform_(weight, -stdv, stdv)
def forward(self, x):
if self.batch_first:
batch_size, _seq_size = x.size()[:2]
else:
_seq_size, batch_size = x.size()[:2]
weights = torch.bmm(x, self.weights.unsqueeze(0).repeat(batch_size,
1, 1))
attentions = torch.softmax(F.relu(weights.squeeze()), dim=-1)
weighted_input = torch.mul(x, attentions.unsqueeze(-1).expand_as(x))
return weighted_input, attentions
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
VAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3q/c3qwr2d2rrpjzvnddomnmdy6cwva4hjlvrn2y5epemk4ak3k2m6c.py
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# h1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jd/cjdtedosfkoutmd76tpyaejxhpwspa7takf5bagemxu5kt4jquxx.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400, ), (1, ))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (20, 400), (400, 1))
assert_size_stride(primals_7, (20, ), (1, ))
assert_size_stride(primals_8, (400, 20), (20, 1))
assert_size_stride(primals_9, (400, ), (1, ))
assert_size_stride(primals_10, (784, 400), (400, 1))
assert_size_stride(primals_11, (784, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1600, grid=grid(1600), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf5, primals_9, 1600, grid=grid(1600), stream=stream0)
del primals_9
buf6 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf7, primals_11, 3136, grid=grid(3136), stream=stream0)
del primals_11
return (buf7, buf2, buf3, primals_1, buf1, buf2, buf5, buf7, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((400, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((784, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class VAE(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400,), (1,))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (20, 400), (400, 1))
assert_size_stride(primals_7, (20,), (1,))
assert_size_stride(primals_8, (400, 20), (20, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (784, 400), (400, 1))
assert_size_stride(primals_11, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_8, (20, 400), (1,
20), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(1600)](buf5, primals_9, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf6 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (400, 784),
(1, 400), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_1[grid(3136)](buf7, primals_11, 3136,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
return (buf7, buf2, buf3, primals_1, buf1, buf2, buf5, buf7, primals_10,
primals_8, primals_6, primals_4)
class VAENew(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc21.weight
primals_5 = self.fc21.bias
primals_6 = self.fc22.weight
primals_7 = self.fc22.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_10 = self.fc4.weight
primals_11 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2]
| nd1511/argus | VAE | false | 7,323 | [
"MIT"
] | 1 | 00aaed41ac1321d669ac7060f4d21b24cc3456f0 | https://github.com/nd1511/argus/tree/00aaed41ac1321d669ac7060f4d21b24cc3456f0 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return []
|
GCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5r/c5rqrqkujbmkvgeup36fkdfjs6rct2qovadf2yme6qovehmb7klh.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add, aten.relu]
# Source node to ATen node mapping:
# x => add
# x_1 => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ul/culvxc5xcnacfjypzxghwcyc2445sqsz25ci4rib6axjxs3fv3so.py
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# pred => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_default, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_default, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yr/cyr6fatjcqc5np3quy6arljtkkff4qjmueyb5b4pk5xvkxgrzuvd.py
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# pred => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 16), (16, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (16, 16), (16, 1))
assert_size_stride(primals_6, (16, ), (1, ))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_0.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_5, out=buf3)
buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf3, out=buf4)
del buf3
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.add, aten.relu]
triton_poi_fused_add_relu_0.run(buf5, primals_6, 64, grid=grid(64), stream=stream0)
del primals_6
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support_2], Original ATen: [aten.mm]
extern_kernels.mm(buf5, primals_7, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(primals_8, primals_3, buf6, alpha=1, beta=1, out=buf7)
del primals_8
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf7, buf8, 16, grid=grid(16), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf8, buf9, 16, grid=grid(16), stream=stream0)
del buf8
return (buf9, buf2, buf5, buf9, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), reinterpret_tensor(primals_5, (16, 16), (1, 16), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.functional as F
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, topology, bsize, i, n_class, bias=True):
super(GraphConvolution, self).__init__()
if i == 0:
self.in_features = in_features
else:
self.in_features = topology[i - 1] * bsize
if i == len(topology):
self.out_features = n_class
else:
self.out_features = topology[i] * bsize
self.weight = Parameter(torch.FloatTensor(self.in_features, self.
out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, infeat, bsize, topology, n_class, dropout):
super(GCN, self).__init__()
self.num_layers = len(topology)
self.layers = nn.ModuleDict({'gc{}'.format(i): GraphConvolution(
infeat, topology, bsize, i, n_class) for i in range(self.
num_layers)})
self.outlayer = GraphConvolution(infeat, topology, bsize, self.
num_layers, n_class)
self.dropout = dropout
def forward(self, x, adj, ls=False):
for i in range(self.num_layers):
x = self.layers['gc' + str(i)](x, adj)
x = F.relu(x)
if i == 0:
x = F.dropout(x, self.dropout, training=self.training)
if ls:
pred = x
else:
x = self.outlayer(x, adj)
pred = F.log_softmax(x, dim=1)
return pred
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'infeat': 4, 'bsize': 4, 'topology': [4, 4], 'n_class': 4,
'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 16), (16, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 16), (16, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(64)](buf2, primals_4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_4
buf3 = buf0
del buf0
extern_kernels.mm(buf2, primals_5, out=buf3)
buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_3, buf3, out=buf4)
del buf3
buf5 = buf4
del buf4
triton_poi_fused_add_relu_0[grid(64)](buf5, primals_6, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, primals_7, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, primals_3, buf6, alpha=1, beta=1,
out=buf7)
del primals_8
buf8 = buf6
del buf6
triton_poi_fused__log_softmax_1[grid(16)](buf7, buf8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__log_softmax_2[grid(16)](buf8, buf9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf8
return buf9, buf2, buf5, buf9, reinterpret_tensor(primals_3, (4, 4), (1,
4), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0
), reinterpret_tensor(primals_5, (16, 16), (1, 16), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, topology, bsize, i, n_class, bias=True):
super(GraphConvolution, self).__init__()
if i == 0:
self.in_features = in_features
else:
self.in_features = topology[i - 1] * bsize
if i == len(topology):
self.out_features = n_class
else:
self.out_features = topology[i] * bsize
self.weight = Parameter(torch.FloatTensor(self.in_features, self.
out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GCNNew(nn.Module):
def __init__(self, infeat, bsize, topology, n_class, dropout):
super(GCNNew, self).__init__()
self.num_layers = len(topology)
self.layers = nn.ModuleDict({'gc{}'.format(i): GraphConvolution(
infeat, topology, bsize, i, n_class) for i in range(self.
num_layers)})
self.outlayer = GraphConvolution(infeat, topology, bsize, self.
num_layers, n_class)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.layers.gc0.weight
primals_4 = self.layers.gc0.bias
primals_5 = self.layers.gc1.weight
primals_6 = self.layers.gc1.bias
primals_7 = self.outlayer.weight
primals_8 = self.outlayer.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| negarhdr/PGCN | GCN | false | 7,324 | [
"MIT"
] | 1 | 5143049afcfadc5ab0173e6083ebbb4fd8c8903d | https://github.com/negarhdr/PGCN/tree/5143049afcfadc5ab0173e6083ebbb4fd8c8903d | from torch.nn import Module
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn as nn
import torch.nn.functional as F
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, topology, bsize, i, n_class, bias=True):
super().__init__()
if i == 0:
self.in_features = in_features
else:
self.in_features = topology[i - 1] * bsize
if i == len(topology):
self.out_features = n_class
else:
self.out_features = topology[i] * bsize
self.weight = Parameter(torch.FloatTensor(self.in_features, self.
out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class Model(nn.Module):
def __init__(self, infeat, bsize, topology, n_class, dropout):
super().__init__()
self.num_layers = len(topology)
self.layers = nn.ModuleDict({'gc{}'.format(i): GraphConvolution(
infeat, topology, bsize, i, n_class) for i in range(self.
num_layers)})
self.outlayer = GraphConvolution(infeat, topology, bsize, self.
num_layers, n_class)
self.dropout = dropout
def forward(self, x, adj, ls=False):
for i in range(self.num_layers):
x = self.layers['gc' + str(i)](x, adj)
x = F.relu(x)
if i == 0:
x = F.dropout(x, self.dropout, training=self.training)
if ls:
pred = x
else:
x = self.outlayer(x, adj)
pred = F.log_softmax(x, dim=1)
return pred
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'infeat': 4, 'bsize': 4, 'topology': [4, 4], 'n_class': 4,
'dropout': 0.5}]
|
Perceptron | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uu/cuuzthplaeal57d4a7qc3d5glgcrm6drww2436xzjpxvtfwoxeym.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_8,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf4, 256, grid=grid(256), stream=stream0)
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Perceptron(nn.Module):
"""Implements a 1-layer perceptron."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(Perceptron, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, inp):
return F.sigmoid(self._layer2(F.relu(self._layer1(inp), inplace=True)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4, 'hidden_dimension': 4,
'output_dimension': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_sigmoid_2[grid(256)](buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf4, primals_4, buf5
class PerceptronNew(nn.Module):
"""Implements a 1-layer perceptron."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(PerceptronNew, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, input_0):
primals_1 = self._layer1.weight
primals_2 = self._layer1.bias
primals_4 = self._layer2.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| negotiatorvivian/SAT-Solver | Perceptron | false | 7,325 | [
"MIT"
] | 1 | acbf375ce73103e945aee3e2a225126684a19076 | https://github.com/negotiatorvivian/SAT-Solver/tree/acbf375ce73103e945aee3e2a225126684a19076 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Implements a 1-layer perceptron."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super().__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, inp):
return F.sigmoid(self._layer2(F.relu(self._layer1(inp), inplace=True)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4, 'hidden_dimension': 4,
'output_dimension': 4}]
|
PerceptronTanh | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgopseyzzv3w63b5jv7s6dhjcmlplinmoz74mtsgq6oaberlqmbt.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_8,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf4, 256, grid=grid(256), stream=stream0)
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PerceptronTanh(nn.Module):
"""Implements a 1-layer perceptron with Tanh activaton."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(PerceptronTanh, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, inp):
return F.tanh(self._layer2(F.relu(self._layer1(inp), inplace=True)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4, 'hidden_dimension': 4,
'output_dimension': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_tanh_2[grid(256)](buf4, 256, XBLOCK=128, num_warps
=4, num_stages=1)
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf4, primals_4, buf5
class PerceptronTanhNew(nn.Module):
"""Implements a 1-layer perceptron with Tanh activaton."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super(PerceptronTanhNew, self).__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, input_0):
primals_1 = self._layer1.weight
primals_2 = self._layer1.bias
primals_4 = self._layer2.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| negotiatorvivian/SAT-Solver | PerceptronTanh | false | 7,326 | [
"MIT"
] | 1 | acbf375ce73103e945aee3e2a225126684a19076 | https://github.com/negotiatorvivian/SAT-Solver/tree/acbf375ce73103e945aee3e2a225126684a19076 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Implements a 1-layer perceptron with Tanh activaton."""
def __init__(self, input_dimension, hidden_dimension, output_dimension):
super().__init__()
self._layer1 = nn.Linear(input_dimension, hidden_dimension)
self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False
)
def forward(self, inp):
return F.tanh(self._layer2(F.relu(self._layer1(inp), inplace=True)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dimension': 4, 'hidden_dimension': 4,
'output_dimension': 4}]
|
ConfidentMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/od/codxjpk4dmqwyksxoe2mwv4ftwthsnntp4epjjoghk3xsimbr6ha.py
# Topologically Sorted Source Nodes: [sub, diff, diff_conf, loss], Original ATen: [aten.sub, aten.pow, aten.mul, aten.mean]
# Source node to ATen node mapping:
# diff => pow_1
# diff_conf => mul
# loss => mean
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %view_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {})
triton_per_fused_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.96
tmp5 = tmp1 > tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, diff, diff_conf, loss], Original ATen: [aten.sub, aten.pow, aten.mul, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
class ConfidentMSELoss(Module):
def __init__(self, threshold=0.96):
self.threshold = threshold
super().__init__()
def forward(self, input, target):
n = input.size(0)
conf_mask = torch.gt(target, self.threshold).float()
input_flat = input.view(n, -1)
target_flat = target.view(n, -1)
conf_mask_flat = conf_mask.view(n, -1)
diff = (input_flat - target_flat) ** 2
diff_conf = diff * conf_mask_flat
loss = diff_conf.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 0.96
tmp5 = tmp1 > tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp3 * tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ConfidentMSELossNew(Module):
def __init__(self, threshold=0.96):
self.threshold = threshold
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| neuropoly/medicaltorch | ConfidentMSELoss | false | 7,327 | [
"Apache-2.0"
] | 1 | ac129fe894cb1906285dfe380ba4f0aa3bdec787 | https://github.com/neuropoly/medicaltorch/tree/ac129fe894cb1906285dfe380ba4f0aa3bdec787 | from torch.nn import Module
import torch
class Model(Module):
def __init__(self, threshold=0.96):
self.threshold = threshold
super().__init__()
def forward(self, input, target):
n = input.size(0)
conf_mask = torch.gt(target, self.threshold).float()
input_flat = input.view(n, -1)
target_flat = target.view(n, -1)
conf_mask_flat = conf_mask.view(n, -1)
diff = (input_flat - target_flat) ** 2
diff_conf = diff * conf_mask_flat
loss = diff_conf.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Conv2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [2], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l7/cl73m2z7ubizl4gjzahoztnfbxiinsybshrc4sjlnb7hovne23sz.py
# Topologically Sorted Source Nodes: [sigmoid, tanh, mul], Original ATen: [aten.sigmoid, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# tanh => tanh
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
triton_poi_fused_mul_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (8*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x0 + (8*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp5 * tmp11
tl.store(out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr2 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (8, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 1), (8, 1, 1))
del buf0
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, tanh, mul], Original ATen: [aten.sigmoid, aten.tanh, aten.mul]
triton_poi_fused_mul_sigmoid_tanh_1.run(buf1, primals_3, buf2, buf3, buf4, buf5, 16, grid=grid(16), stream=stream0)
del buf1
del buf2
del primals_3
return (buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class Conv2(nn.Module):
""" A convolution layer with the stride of 2.
Input:
x: (N, 2L+2, in_channels) numeric tensor
global_cond: (N, global_cond_channels) numeric tensor
Output:
y: (N, L, out_channels) numeric tensor
"""
def __init__(self, in_channels, out_channels, global_cond_channels):
super().__init__()
ksz = 4
self.out_channels = out_channels
if 0 < global_cond_channels:
self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels,
bias=False)
self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2
)
wsize = 2.967 / math.sqrt(ksz * in_channels)
self.conv_wide.weight.data.uniform_(-wsize, wsize)
self.conv_wide.bias.data.zero_()
def forward(self, x, global_cond):
x1 = self.conv_wide(x.transpose(1, 2)).transpose(1, 2)
if global_cond is not None:
x2 = self.w_cond(global_cond).unsqueeze(1).expand(-1, x1.size(1
), -1)
else:
x2 = torch.zeros_like(x1)
a, b = (x1 + x2).split(self.out_channels, dim=2)
return torch.sigmoid(a) * torch.tanh(b)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4,
'global_cond_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 8 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x0 + 8 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp12 = tmp5 * tmp11
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (8, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 1), (8, 1, 1))
del buf0
buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 8),
(1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_tanh_1[grid(16)](buf1, primals_3, buf2,
buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_3
return buf5, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4,
4), (16, 1, 4), 0), buf3, buf4
class Conv2New(nn.Module):
""" A convolution layer with the stride of 2.
Input:
x: (N, 2L+2, in_channels) numeric tensor
global_cond: (N, global_cond_channels) numeric tensor
Output:
y: (N, L, out_channels) numeric tensor
"""
def __init__(self, in_channels, out_channels, global_cond_channels):
super().__init__()
ksz = 4
self.out_channels = out_channels
if 0 < global_cond_channels:
self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels,
bias=False)
self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2
)
wsize = 2.967 / math.sqrt(ksz * in_channels)
self.conv_wide.weight.data.uniform_(-wsize, wsize)
self.conv_wide.bias.data.zero_()
def forward(self, input_0, input_1):
primals_5 = self.w_cond.weight
primals_2 = self.conv_wide.weight
primals_3 = self.conv_wide.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| neverix/voice-conv | Conv2 | false | 7,328 | [
"MIT"
] | 1 | 6df0053a59aa26318bdbc096dd312ecc55596ac0 | https://github.com/neverix/voice-conv/tree/6df0053a59aa26318bdbc096dd312ecc55596ac0 | import math
import torch
import torch.nn as nn
class Model(nn.Module):
""" A convolution layer with the stride of 2.
Input:
x: (N, 2L+2, in_channels) numeric tensor
global_cond: (N, global_cond_channels) numeric tensor
Output:
y: (N, L, out_channels) numeric tensor
"""
def __init__(self, in_channels, out_channels, global_cond_channels):
super().__init__()
ksz = 4
self.out_channels = out_channels
if 0 < global_cond_channels:
self.w_cond = nn.Linear(global_cond_channels, 2 * out_channels,
bias=False)
self.conv_wide = nn.Conv1d(in_channels, 2 * out_channels, ksz, stride=2
)
wsize = 2.967 / math.sqrt(ksz * in_channels)
self.conv_wide.weight.data.uniform_(-wsize, wsize)
self.conv_wide.bias.data.zero_()
def forward(self, x, global_cond):
x1 = self.conv_wide(x.transpose(1, 2)).transpose(1, 2)
if global_cond is not None:
x2 = self.w_cond(global_cond).unsqueeze(1).expand(-1, x1.size(1
), -1)
else:
x2 = torch.zeros_like(x1)
a, b = (x1 + x2).split(self.out_channels, dim=2)
return torch.sigmoid(a) * torch.tanh(b)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4,
'global_cond_channels': 4}]
|
Attention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn_out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), arg1_1, out=buf3)
del arg1_1
return (buf3, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""
Applies an attention mechanism on the query features from the decoder.
.. math::
\\begin{array}{ll}
x = context*query \\\\
attn_scores = exp(x_i) / sum_j exp(x_j) \\\\
attn_out = attn * context
\\end{array}
Args:
dim(int): The number of expected features in the query
Inputs: query, context
- **query** (batch, query_len, dimensions): tensor containing the query features from the decoder.
- **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
Outputs: query, attn
- **query** (batch, query_len, dimensions): tensor containing the attended query features from the decoder.
- **attn** (batch, query_len, input_len): tensor containing attention weights.
Attributes:
mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.
"""
def __init__(self):
super(Attention, self).__init__()
self.mask = None
def set_mask(self, mask):
"""
Sets indices to be masked
Args:
mask (torch.Tensor): tensor containing indices to be masked
"""
self.mask = mask
"""
- query (batch, query_len, dimensions): tensor containing the query features from the decoder.
- context (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
"""
def forward(self, query, context):
batch_size = query.size(0)
query.size(2)
in_len = context.size(1)
attn = torch.bmm(query, context.transpose(1, 2))
if self.mask is not None:
attn.data.masked_fill_(self.mask, -float('inf'))
attn_scores = F.softmax(attn.view(-1, in_len), dim=1).view(batch_size,
-1, in_len)
attn_out = torch.bmm(attn_scores, context)
return attn_out, attn_scores
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), arg1_1, out=buf3)
del arg1_1
return buf3, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
class AttentionNew(nn.Module):
"""
Applies an attention mechanism on the query features from the decoder.
.. math::
\\begin{array}{ll}
x = context*query \\\\
attn_scores = exp(x_i) / sum_j exp(x_j) \\\\
attn_out = attn * context
\\end{array}
Args:
dim(int): The number of expected features in the query
Inputs: query, context
- **query** (batch, query_len, dimensions): tensor containing the query features from the decoder.
- **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
Outputs: query, attn
- **query** (batch, query_len, dimensions): tensor containing the attended query features from the decoder.
- **attn** (batch, query_len, input_len): tensor containing attention weights.
Attributes:
mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.
"""
def __init__(self):
super(AttentionNew, self).__init__()
self.mask = None
def set_mask(self, mask):
"""
Sets indices to be masked
Args:
mask (torch.Tensor): tensor containing indices to be masked
"""
self.mask = mask
"""
- query (batch, query_len, dimensions): tensor containing the query features from the decoder.
- context (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| nguyenxuanhoi2903/SRSF_summarization | Attention | false | 7,329 | [
"MIT"
] | 1 | 3d19e6b7669e0b22bab533fc637a434f379ed392 | https://github.com/nguyenxuanhoi2903/SRSF_summarization/tree/3d19e6b7669e0b22bab533fc637a434f379ed392 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Applies an attention mechanism on the query features from the decoder.
.. math::
\\begin{array}{ll}
x = context*query \\\\
attn_scores = exp(x_i) / sum_j exp(x_j) \\\\
attn_out = attn * context
\\end{array}
Args:
dim(int): The number of expected features in the query
Inputs: query, context
- **query** (batch, query_len, dimensions): tensor containing the query features from the decoder.
- **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
Outputs: query, attn
- **query** (batch, query_len, dimensions): tensor containing the attended query features from the decoder.
- **attn** (batch, query_len, input_len): tensor containing attention weights.
Attributes:
mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.
"""
def __init__(self):
super().__init__()
self.mask = None
def set_mask(self, mask):
"""
Sets indices to be masked
Args:
mask (torch.Tensor): tensor containing indices to be masked
"""
self.mask = mask
"""
- query (batch, query_len, dimensions): tensor containing the query features from the decoder.
- context (batch, input_len, dimensions): tensor containing features of the encoded input sequence.
"""
def forward(self, query, context):
batch_size = query.size(0)
query.size(2)
in_len = context.size(1)
attn = torch.bmm(query, context.transpose(1, 2))
if self.mask is not None:
attn.data.masked_fill_(self.mask, -float('inf'))
attn_scores = F.softmax(attn.view(-1, in_len), dim=1).view(batch_size,
-1, in_len)
attn_out = torch.bmm(attn_scores, context)
return attn_out, attn_scores
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
MinusRbfHSIC | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/e7/ce7xjdj2tvxcwlhhngstv777hdifvqsxqnkbqoex2db5kmz3pccu.py
# Topologically Sorted Source Nodes: [Xn, X_1], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# X_1 => div
# Xn => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %pow_2), kwargs = {})
triton_per_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp0 / tmp6
tl.store(out_ptr1 + (r1 + (64*x0)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gv/cgvqntvmbnygnswu42nmfoc5z5zjax6kxfpgxm6jtykcehwkrvi7.py
# Topologically Sorted Source Nodes: [mul, add, X_L2, X_L2_1, mean, mul_1, gamma, neg, mul_2, kernel_XX, diag_2, tK, sum_1], Original ATen: [aten.mul, aten.add, aten.clamp, aten.mean, aten.reciprocal, aten.neg, aten.exp, aten.diagonal_copy, aten.sub, aten.sum]
# Source node to ATen node mapping:
# X_L2 => add_1
# X_L2_1 => clamp_min
# add => add
# diag_2 => diagonal_copy_2
# gamma => mul_2, reciprocal
# kernel_XX => exp
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_3
# neg => neg
# sum_1 => sum_4
# tK => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, -2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze_1), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 1e-12), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 2), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mul_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %clamp_min), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %diagonal_copy_2 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%exp,), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp, %diagonal_copy_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub,), kwargs = {})
triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1 = async_compile.triton('triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 4)
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp3 = tl.load(in_ptr0 + (5*r1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last')
tmp1 = -2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 16.0
tmp13 = tmp11 / tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = tl.full([1, 1], 1, tl.int32)
tmp17 = tmp16 / tmp15
tmp18 = 1.0
tmp19 = tmp17 * tmp18
tmp20 = -tmp19
tmp21 = tmp20 * tmp8
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp5 * tmp1
tmp24 = tmp23 + tmp5
tmp25 = tmp24 + tmp5
tmp26 = triton_helpers.maximum(tmp25, tmp7)
tmp27 = tmp20 * tmp26
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp22 - tmp28
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tl.store(out_ptr1 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp29, None)
tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sq/csq4ts4ymmp3skpjo5ndeq3xb7zhrbverearsk22gshhezg3xbgj.py
# Topologically Sorted Source Nodes: [trace, mul_6, truediv_2, truediv_3, add_4, sum_3, sum_4, dot, mul_7, truediv_4, hsic, truediv_5, neg_2], Original ATen: [aten.trace, aten.mul, aten.div, aten.add, aten.sum, aten.dot, aten.sub, aten.neg]
# Source node to ATen node mapping:
# add_4 => add_4
# dot => mul_9, sum_8
# hsic => sub_2
# mul_6 => mul_8
# mul_7 => mul_10
# neg_2 => neg_2
# sum_3 => sum_6
# sum_4 => sum_7
# trace => diagonal_copy_4, sum_3
# truediv_2 => div_2
# truediv_3 => div_3
# truediv_4 => div_4
# truediv_5 => div_5
# Graph fragment:
# %diagonal_copy_4 : [num_users=1] = call_function[target=torch.ops.aten.diagonal_copy.default](args = (%mm_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%diagonal_copy_4,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, %sum_5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_8, 3), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %div_3), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [0]), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [0]), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, %sum_7), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_9,), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, 2), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_10, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %div_4), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, 4), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div_5,), kwargs = {})
triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_2 = async_compile.triton('triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr1 + (4 + r0), None)
tmp7 = tl.load(in_ptr1 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (12 + r0), None)
tmp11 = tl.load(in_ptr2 + (r0), None)
tmp12 = tl.load(in_ptr2 + (4 + r0), None)
tmp14 = tl.load(in_ptr2 + (8 + r0), None)
tmp16 = tl.load(in_ptr2 + (12 + r0), None)
tmp22 = tl.load(in_ptr3 + (0))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, 1])
tmp24 = tl.load(in_ptr4 + (0))
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = tmp10 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tmp26 = tmp23 * tmp25
tmp27 = 0.3333333333333333
tmp28 = tmp26 * tmp27
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = tmp3 + tmp30
tmp32 = 2.0
tmp33 = tmp21 * tmp32
tmp34 = tmp33 * tmp29
tmp35 = tmp31 - tmp34
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tmp38 = -tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp38, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [Xn, X_1], Original ATen: [aten.linalg_vector_norm, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_0.run(arg0_1, buf1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [XX], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (64, 4), (1, 64), 0), out=buf2)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, X_L2, X_L2_1, mean, mul_1, gamma, neg, mul_2, kernel_XX, diag_2, tK, sum_1], Original ATen: [aten.mul, aten.add, aten.clamp, aten.mean, aten.reciprocal, aten.neg, aten.exp, aten.diagonal_copy, aten.sub, aten.sum]
triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1.run(buf2, buf4, buf12, 1, 16, grid=grid(1), stream=stream0)
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [Xn_1, X_3], Original ATen: [aten.linalg_vector_norm, aten.div]
triton_per_fused_div_linalg_vector_norm_0.run(arg1_1, buf6, 4, 64, grid=grid(4), stream=stream0)
del arg1_1
buf7 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [XX_1], Original ATen: [aten.mm]
extern_kernels.mm(buf6, reinterpret_tensor(buf6, (64, 4), (1, 64), 0), out=buf7)
del buf6
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul_3, add_2, X_L2_2, X_L2_3, mean_1, mul_4, gamma_1, neg_1, mul_5, kernel_XX_1, diag_3, tL, sum_2], Original ATen: [aten.mul, aten.add, aten.clamp, aten.mean, aten.reciprocal, aten.neg, aten.exp, aten.diagonal_copy, aten.sub, aten.sum]
triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1.run(buf7, buf9, buf13, 1, 16, grid=grid(1), stream=stream0)
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(buf4, buf9, out=buf10)
buf11 = empty_strided_cuda((), (), torch.float32)
buf15 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [trace, mul_6, truediv_2, truediv_3, add_4, sum_3, sum_4, dot, mul_7, truediv_4, hsic, truediv_5, neg_2], Original ATen: [aten.trace, aten.mul, aten.div, aten.add, aten.sum, aten.dot, aten.sub, aten.neg]
triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_2.run(buf15, buf10, buf4, buf9, buf12, buf13, 1, 4, grid=grid(1), stream=stream0)
del buf10
del buf12
del buf13
del buf4
del buf9
return (buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class HSIC(nn.Module):
"""Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC)
..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent.
Empirically, we use the finite sample estimator of HSIC (with m observations) by,
(1) biased estimator (HSIC_0)
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
:math: (m - 1)^2 tr KHLH.
where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices).
(2) unbiased estimator (HSIC_1)
Song, Le, et al. "Feature selection via dependence maximization." 2012.
:math: rac{1}{m (m - 3)} igg[ tr ( ilde K ilde L) + rac{1^ op ilde K 1 1^ op ilde L 1}{(m-1)(m-2)} - rac{2}{m-2} 1^ op ilde K ilde L 1 igg].
where ilde K and ilde L are related to K and L by the diagonal entries of ilde K_{ij} and ilde L_{ij} are set to zero.
Parameters
----------
sigma_x : float
the kernel size of the kernel function for X.
sigma_y : float
the kernel size of the kernel function for Y.
algorithm: str ('unbiased' / 'biased')
the algorithm for the finite sample estimator. 'unbiased' is used for our paper.
reduction: not used (for compatibility with other losses).
"""
def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased',
reduction=None):
super(HSIC, self).__init__()
if sigma_y is None:
sigma_y = sigma_x
self.sigma_x = sigma_x
self.sigma_y = sigma_y
if algorithm == 'biased':
self.estimator = self.biased_estimator
elif algorithm == 'unbiased':
self.estimator = self.unbiased_estimator
else:
raise ValueError('invalid estimator: {}'.format(algorithm))
def _kernel_x(self, X):
raise NotImplementedError
def _kernel_y(self, Y):
raise NotImplementedError
def biased_estimator(self, input1, input2):
"""Biased estimator of Hilbert-Schmidt Independence Criterion
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
"""
K = self._kernel_x(input1)
L = self._kernel_y(input2)
KH = K - K.mean(0, keepdim=True)
LH = L - L.mean(0, keepdim=True)
N = len(input1)
return torch.trace(KH @ LH / (N - 1) ** 2)
def unbiased_estimator(self, input1, input2):
"""Unbiased estimator of Hilbert-Schmidt Independence Criterion
Song, Le, et al. "Feature selection via dependence maximization." 2012.
"""
kernel_XX = self._kernel_x(input1)
kernel_YY = self._kernel_y(input2)
tK = kernel_XX - torch.diag(kernel_XX)
tL = kernel_YY - torch.diag(kernel_YY)
N = len(input1)
hsic = torch.trace(tK @ tL) + torch.sum(tK) * torch.sum(tL) / (N - 1
) / (N - 2) - 2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2)
return hsic / (N * (N - 3))
def forward(self, input1, input2, **kwargs):
return self.estimator(input1, input2)
class RbfHSIC(HSIC):
"""Radial Basis Function (RBF) kernel HSIC implementation.
"""
def _kernel(self, X, sigma):
X = X.view(len(X), -1)
Xn = X.norm(2, dim=1, keepdim=True)
X = X.div(Xn)
XX = X @ X.t()
X_sqnorms = torch.diag(XX)
X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0)
X_L2 = X_L2.clamp(1e-12)
sigma_avg = X_L2.mean().detach()
gamma = 1 / (2 * sigma_avg)
kernel_XX = torch.exp(-gamma * X_L2)
return kernel_XX
def _kernel_x(self, X):
return self._kernel(X, self.sigma_x)
def _kernel_y(self, Y):
return self._kernel(Y, self.sigma_y)
class MinusRbfHSIC(RbfHSIC):
"""``Minus'' RbfHSIC for the ``max'' optimization.
"""
def forward(self, input1, input2, **kwargs):
return -self.estimator(input1, input2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma_x': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tmp0 / tmp6
tl.store(out_ptr1 + (r1 + 64 * x0), tmp7, xmask)
@triton.jit
def triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1(
in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
r0 = rindex % 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp1 = -2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1e-12
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 16.0
tmp13 = tmp11 / tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = tl.full([1, 1], 1, tl.int32)
tmp17 = tmp16 / tmp15
tmp18 = 1.0
tmp19 = tmp17 * tmp18
tmp20 = -tmp19
tmp21 = tmp20 * tmp8
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp5 * tmp1
tmp24 = tmp23 + tmp5
tmp25 = tmp24 + tmp5
tmp26 = triton_helpers.maximum(tmp25, tmp7)
tmp27 = tmp20 * tmp26
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp22 - tmp28
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp29, None)
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
@triton.jit
def triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl
.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr1 + (4 + r0), None)
tmp7 = tl.load(in_ptr1 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (12 + r0), None)
tmp11 = tl.load(in_ptr2 + r0, None)
tmp12 = tl.load(in_ptr2 + (4 + r0), None)
tmp14 = tl.load(in_ptr2 + (8 + r0), None)
tmp16 = tl.load(in_ptr2 + (12 + r0), None)
tmp22 = tl.load(in_ptr3 + 0)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, 1])
tmp24 = tl.load(in_ptr4 + 0)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp18 = tmp10 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tmp26 = tmp23 * tmp25
tmp27 = 0.3333333333333333
tmp28 = tmp26 * tmp27
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = tmp3 + tmp30
tmp32 = 2.0
tmp33 = tmp21 * tmp32
tmp34 = tmp33 * tmp29
tmp35 = tmp31 - tmp34
tmp36 = 0.25
tmp37 = tmp35 * tmp36
tmp38 = -tmp37
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_0[grid(4)](arg0_1, buf1, 4,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(buf1, (64, 4), (1, 64),
0), out=buf2)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1[
grid(1)](buf2, buf4, buf12, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf6 = buf1
del buf1
triton_per_fused_div_linalg_vector_norm_0[grid(4)](arg1_1, buf6, 4,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
buf7 = buf2
del buf2
extern_kernels.mm(buf6, reinterpret_tensor(buf6, (64, 4), (1, 64),
0), out=buf7)
del buf6
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_diagonal_copy_exp_mean_mul_neg_reciprocal_sub_sum_1[
grid(1)](buf7, buf9, buf13, 1, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf10 = buf7
del buf7
extern_kernels.mm(buf4, buf9, out=buf10)
buf11 = empty_strided_cuda((), (), torch.float32)
buf15 = buf11
del buf11
triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_2[grid(1)](buf15,
buf10, buf4, buf9, buf12, buf13, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del buf10
del buf12
del buf13
del buf4
del buf9
return buf15,
class HSIC(nn.Module):
"""Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC)
..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent.
Empirically, we use the finite sample estimator of HSIC (with m observations) by,
(1) biased estimator (HSIC_0)
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
:math: (m - 1)^2 tr KHLH.
where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices).
(2) unbiased estimator (HSIC_1)
Song, Le, et al. "Feature selection via dependence maximization." 2012.
:math: rac{1}{m (m - 3)} igg[ tr ( ilde K ilde L) + rac{1^ op ilde K 1 1^ op ilde L 1}{(m-1)(m-2)} - rac{2}{m-2} 1^ op ilde K ilde L 1 igg].
where ilde K and ilde L are related to K and L by the diagonal entries of ilde K_{ij} and ilde L_{ij} are set to zero.
Parameters
----------
sigma_x : float
the kernel size of the kernel function for X.
sigma_y : float
the kernel size of the kernel function for Y.
algorithm: str ('unbiased' / 'biased')
the algorithm for the finite sample estimator. 'unbiased' is used for our paper.
reduction: not used (for compatibility with other losses).
"""
def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased',
reduction=None):
super(HSIC, self).__init__()
if sigma_y is None:
sigma_y = sigma_x
self.sigma_x = sigma_x
self.sigma_y = sigma_y
if algorithm == 'biased':
self.estimator = self.biased_estimator
elif algorithm == 'unbiased':
self.estimator = self.unbiased_estimator
else:
raise ValueError('invalid estimator: {}'.format(algorithm))
def _kernel_x(self, X):
raise NotImplementedError
def _kernel_y(self, Y):
raise NotImplementedError
def biased_estimator(self, input1, input2):
"""Biased estimator of Hilbert-Schmidt Independence Criterion
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
"""
K = self._kernel_x(input1)
L = self._kernel_y(input2)
KH = K - K.mean(0, keepdim=True)
LH = L - L.mean(0, keepdim=True)
N = len(input1)
return torch.trace(KH @ LH / (N - 1) ** 2)
def unbiased_estimator(self, input1, input2):
"""Unbiased estimator of Hilbert-Schmidt Independence Criterion
Song, Le, et al. "Feature selection via dependence maximization." 2012.
"""
kernel_XX = self._kernel_x(input1)
kernel_YY = self._kernel_y(input2)
tK = kernel_XX - torch.diag(kernel_XX)
tL = kernel_YY - torch.diag(kernel_YY)
N = len(input1)
hsic = torch.trace(tK @ tL) + torch.sum(tK) * torch.sum(tL) / (N - 1
) / (N - 2) - 2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2)
return hsic / (N * (N - 3))
def forward(self, input1, input2, **kwargs):
return self.estimator(input1, input2)
class RbfHSIC(HSIC):
"""Radial Basis Function (RBF) kernel HSIC implementation.
"""
def _kernel(self, X, sigma):
X = X.view(len(X), -1)
Xn = X.norm(2, dim=1, keepdim=True)
X = X.div(Xn)
XX = X @ X.t()
X_sqnorms = torch.diag(XX)
X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0)
X_L2 = X_L2.clamp(1e-12)
sigma_avg = X_L2.mean().detach()
gamma = 1 / (2 * sigma_avg)
kernel_XX = torch.exp(-gamma * X_L2)
return kernel_XX
def _kernel_x(self, X):
return self._kernel(X, self.sigma_x)
def _kernel_y(self, Y):
return self._kernel(Y, self.sigma_y)
class MinusRbfHSICNew(RbfHSIC):
"""``Minus'' RbfHSIC for the ``max'' optimization.
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| naver-ai/cgl_fairness | MinusRbfHSIC | false | 7,330 | [
"MIT"
] | 1 | 00d3bec233c9b3e0f88496118abaed8321ca3159 | https://github.com/naver-ai/cgl_fairness/tree/00d3bec233c9b3e0f88496118abaed8321ca3159 | import torch
import torch.nn as nn
class HSIC(nn.Module):
"""Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC)
..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent.
Empirically, we use the finite sample estimator of HSIC (with m observations) by,
(1) biased estimator (HSIC_0)
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
:math: (m - 1)^2 tr KHLH.
where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices).
(2) unbiased estimator (HSIC_1)
Song, Le, et al. "Feature selection via dependence maximization." 2012.
:math: rac{1}{m (m - 3)} igg[ tr ( ilde K ilde L) + rac{1^ op ilde K 1 1^ op ilde L 1}{(m-1)(m-2)} - rac{2}{m-2} 1^ op ilde K ilde L 1 igg].
where ilde K and ilde L are related to K and L by the diagonal entries of ilde K_{ij} and ilde L_{ij} are set to zero.
Parameters
----------
sigma_x : float
the kernel size of the kernel function for X.
sigma_y : float
the kernel size of the kernel function for Y.
algorithm: str ('unbiased' / 'biased')
the algorithm for the finite sample estimator. 'unbiased' is used for our paper.
reduction: not used (for compatibility with other losses).
"""
def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased',
reduction=None):
super().__init__()
if sigma_y is None:
sigma_y = sigma_x
self.sigma_x = sigma_x
self.sigma_y = sigma_y
if algorithm == 'biased':
self.estimator = self.biased_estimator
elif algorithm == 'unbiased':
self.estimator = self.unbiased_estimator
else:
raise ValueError('invalid estimator: {}'.format(algorithm))
def _kernel_x(self, X):
raise NotImplementedError
def _kernel_y(self, Y):
raise NotImplementedError
def biased_estimator(self, input1, input2):
"""Biased estimator of Hilbert-Schmidt Independence Criterion
Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005.
"""
K = self._kernel_x(input1)
L = self._kernel_y(input2)
KH = K - K.mean(0, keepdim=True)
LH = L - L.mean(0, keepdim=True)
N = len(input1)
return torch.trace(KH @ LH / (N - 1) ** 2)
def unbiased_estimator(self, input1, input2):
"""Unbiased estimator of Hilbert-Schmidt Independence Criterion
Song, Le, et al. "Feature selection via dependence maximization." 2012.
"""
kernel_XX = self._kernel_x(input1)
kernel_YY = self._kernel_y(input2)
tK = kernel_XX - torch.diag(kernel_XX)
tL = kernel_YY - torch.diag(kernel_YY)
N = len(input1)
hsic = torch.trace(tK @ tL) + torch.sum(tK) * torch.sum(tL) / (N - 1
) / (N - 2) - 2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2)
return hsic / (N * (N - 3))
def forward(self, input1, input2, **kwargs):
return self.estimator(input1, input2)
class RbfHSIC(HSIC):
"""Radial Basis Function (RBF) kernel HSIC implementation.
"""
def _kernel(self, X, sigma):
X = X.view(len(X), -1)
Xn = X.norm(2, dim=1, keepdim=True)
X = X.div(Xn)
XX = X @ X.t()
X_sqnorms = torch.diag(XX)
X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0)
X_L2 = X_L2.clamp(1e-12)
sigma_avg = X_L2.mean().detach()
gamma = 1 / (2 * sigma_avg)
kernel_XX = torch.exp(-gamma * X_L2)
return kernel_XX
def _kernel_x(self, X):
return self._kernel(X, self.sigma_x)
def _kernel_y(self, Y):
return self._kernel(Y, self.sigma_y)
class Model(RbfHSIC):
"""``Minus'' RbfHSIC for the ``max'' optimization.
"""
# ... truncated (>4000 chars) for memory efficiency |
LayerNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ol/colqxuikz5paponukbqdxclupy3w5be5gltp5osrvuaalfughrw4.py
# Topologically Sorted Source Nodes: [mean, sub_1, sub, pow_1, var, add, sqrt, truediv], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# pow_1 => pow_1
# sqrt => sqrt
# sub => sub
# sub_1 => sub_1
# truediv => div
# var => mean_1
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
triton_poi_fused_add_div_mean_pow_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_pow_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub_1, sub, pow_1, var, add, sqrt, truediv], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class LayerNorm(torch.nn.Module):
"""
A vanilla implementation of layer normalization https://arxiv.org/pdf/1607.06450.pdf
norm_x = (x - mean) / sqrt((x - mean) ^ 2)
This does not include the trainable parameters gamma and beta for performance speed.
Typically, this is norm_x * gamma + beta
"""
def forward(self, layer_activations: 'torch.Tensor') ->torch.Tensor:
mean = torch.mean(layer_activations, dim=-1, keepdim=True)
var = torch.mean((layer_activations - mean) ** 2, dim=-1, keepdim=True)
return (layer_activations - mean) / torch.sqrt(var + 1e-05)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_sub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LayerNormNew(torch.nn.Module):
"""
A vanilla implementation of layer normalization https://arxiv.org/pdf/1607.06450.pdf
norm_x = (x - mean) / sqrt((x - mean) ^ 2)
This does not include the trainable parameters gamma and beta for performance speed.
Typically, this is norm_x * gamma + beta
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| netdrones/ml-agents | LayerNorm | false | 7,331 | [
"Apache-2.0"
] | 1 | 7d7d6f149c92ea2067d7cea364d92c8c3b8db3f4 | https://github.com/netdrones/ml-agents/tree/7d7d6f149c92ea2067d7cea364d92c8c3b8db3f4 | import torch
class Model(torch.nn.Module):
"""
A vanilla implementation of layer normalization https://arxiv.org/pdf/1607.06450.pdf
norm_x = (x - mean) / sqrt((x - mean) ^ 2)
This does not include the trainable parameters gamma and beta for performance speed.
Typically, this is norm_x * gamma + beta
"""
def forward(self, layer_activations: 'torch.Tensor') ->torch.Tensor:
mean = torch.mean(layer_activations, dim=-1, keepdim=True)
var = torch.mean((layer_activations - mean) ** 2, dim=-1, keepdim=True)
return (layer_activations - mean) / torch.sqrt(var + 1e-05)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TokenClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_states_2 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/32/c32vfxouqe74ea5scuzrdhpd7r6adxwu4bzarm4icjfnb47jbizg.py
# Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_states_2 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_states_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def transformer_weights_init(module, std_init_range=0.02, xavier=True):
"""
Initialize different weights in Transformer model.
Args:
module: torch.nn.Module to be initialized
std_init_range: standard deviation of normal initializer
xavier: if True, xavier initializer will be used in Linear layers
as was proposed in AIAYN paper, otherwise normal initializer
will be used (like in BERT paper)
"""
if isinstance(module, nn.Linear):
if xavier:
nn.init.xavier_uniform_(module.weight)
else:
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.weight, 1.0)
nn.init.constant_(module.bias, 0.0)
class MultiLayerPerceptron(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
activations = {'relu': nn.ReLU(), 'gelu': nn.GELU(), 'sigmoid': nn.
Sigmoid(), 'tanh': nn.Tanh()}
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', activations[activation])
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, hidden_states):
output_states = hidden_states[:]
for i in range(self.layers):
output_states = getattr(self, f'layer{i}')(output_states)
if self.log_softmax:
output_states = torch.log_softmax(output_states, dim=-1)
else:
output_states = torch.softmax(output_states, dim=-1)
return output_states
class TokenClassifier(nn.Module):
"""
A module to perform token level classification tasks such as Named entity recognition.
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=1, activation: 'str'='relu', log_softmax: 'bool'=True,
dropout: 'float'=0.0, use_transformer_init: 'bool'=True) ->None:
"""
Initializes the Token Classifier module.
Args:
hidden_size: the size of the hidden dimension
num_classes: number of classes
num_layers: number of fully connected layers in the multilayer perceptron (MLP)
activation: activation to usee between fully connected layers in the MLP
log_softmax: whether to apply softmax to the output of the MLP
dropout: dropout to apply to the input hidden states
use_transformer_init: whether to initialize the weights of the classifier head with the same approach used in Transformer
"""
super().__init__()
self.log_softmax = log_softmax
self.mlp = MultiLayerPerceptron(hidden_size, num_classes,
num_layers=num_layers, activation=activation, log_softmax=
log_softmax)
self.dropout = nn.Dropout(dropout)
if use_transformer_init:
self.apply(lambda module: transformer_weights_init(module,
xavier=False))
def forward(self, hidden_states):
"""
Performs the forward step of the module.
Args:
hidden_states: batch of hidden states (for example, from the BERT encoder module)
[BATCH_SIZE x SEQ_LENGTH x HIDDEN_SIZE]
Returns: logits value for each class [BATCH_SIZE x SEQ_LENGTH x NUM_CLASSES]
"""
hidden_states = self.dropout(hidden_states)
logits = self.mlp(hidden_states)
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2
def transformer_weights_init(module, std_init_range=0.02, xavier=True):
"""
Initialize different weights in Transformer model.
Args:
module: torch.nn.Module to be initialized
std_init_range: standard deviation of normal initializer
xavier: if True, xavier initializer will be used in Linear layers
as was proposed in AIAYN paper, otherwise normal initializer
will be used (like in BERT paper)
"""
if isinstance(module, nn.Linear):
if xavier:
nn.init.xavier_uniform_(module.weight)
else:
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.weight, 1.0)
nn.init.constant_(module.bias, 0.0)
class MultiLayerPerceptron(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
activations = {'relu': nn.ReLU(), 'gelu': nn.GELU(), 'sigmoid': nn.
Sigmoid(), 'tanh': nn.Tanh()}
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', activations[activation])
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, hidden_states):
output_states = hidden_states[:]
for i in range(self.layers):
output_states = getattr(self, f'layer{i}')(output_states)
if self.log_softmax:
output_states = torch.log_softmax(output_states, dim=-1)
else:
output_states = torch.softmax(output_states, dim=-1)
return output_states
class TokenClassifierNew(nn.Module):
"""
A module to perform token level classification tasks such as Named entity recognition.
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=1, activation: 'str'='relu', log_softmax: 'bool'=True,
dropout: 'float'=0.0, use_transformer_init: 'bool'=True) ->None:
"""
Initializes the Token Classifier module.
Args:
hidden_size: the size of the hidden dimension
num_classes: number of classes
num_layers: number of fully connected layers in the multilayer perceptron (MLP)
activation: activation to usee between fully connected layers in the MLP
log_softmax: whether to apply softmax to the output of the MLP
dropout: dropout to apply to the input hidden states
use_transformer_init: whether to initialize the weights of the classifier head with the same approach used in Transformer
"""
super().__init__()
self.log_softmax = log_softmax
self.mlp = MultiLayerPerceptron(hidden_size, num_classes,
num_layers=num_layers, activation=activation, log_softmax=
log_softmax)
self.dropout = nn.Dropout(dropout)
if use_transformer_init:
self.apply(lambda module: transformer_weights_init(module,
xavier=False))
def forward(self, input_0):
primals_2 = self.mlp.layer0.weight
primals_3 = self.mlp.layer0.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ngxingyu/Domain-Transfer-for-Punctuation-Retrieval | TokenClassifier | false | 7,332 | [
"Apache-2.0"
] | 1 | f5aa0ea0946c68aaf7fcf49a5085e6c823766a2f | https://github.com/ngxingyu/Domain-Transfer-for-Punctuation-Retrieval/tree/f5aa0ea0946c68aaf7fcf49a5085e6c823766a2f | import torch
import torch.nn as nn
def transformer_weights_init(module, std_init_range=0.02, xavier=True):
"""
Initialize different weights in Transformer model.
Args:
module: torch.nn.Module to be initialized
std_init_range: standard deviation of normal initializer
xavier: if True, xavier initializer will be used in Linear layers
as was proposed in AIAYN paper, otherwise normal initializer
will be used (like in BERT paper)
"""
if isinstance(module, nn.Linear):
if xavier:
nn.init.xavier_uniform_(module.weight)
else:
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, mean=0.0, std=std_init_range)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.weight, 1.0)
nn.init.constant_(module.bias, 0.0)
class MultiLayerPerceptron(torch.nn.Module):
"""
A simple MLP that can either be used independently or put on top
of pretrained models (such as BERT) and act as a classifier.
Args:
hidden_size (int): the size of each layer
num_classes (int): number of output classes
num_layers (int): number of layers
activation (str): type of activations for layers in between
log_softmax (bool): whether to add a log_softmax layer before output
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True):
super().__init__()
self.layers = 0
activations = {'relu': nn.ReLU(), 'gelu': nn.GELU(), 'sigmoid': nn.
Sigmoid(), 'tanh': nn.Tanh()}
for _ in range(num_layers - 1):
layer = torch.nn.Linear(hidden_size, hidden_size)
setattr(self, f'layer{self.layers}', layer)
setattr(self, f'layer{self.layers + 1}', activations[activation])
self.layers += 2
layer = torch.nn.Linear(hidden_size, num_classes)
setattr(self, f'layer{self.layers}', layer)
self.layers += 1
self.log_softmax = log_softmax
@property
def last_linear_layer(self):
return getattr(self, f'layer{self.layers - 1}')
def forward(self, hidden_states):
output_states = hidden_states[:]
for i in range(self.layers):
output_states = getattr(self, f'layer{i}')(output_states)
if self.log_softmax:
output_states = torch.log_softmax(output_states, dim=-1)
else:
output_states = torch.softmax(output_states, dim=-1)
return output_states
class Model(nn.Module):
"""
A module to perform token level classification tasks such as Named entity recognition.
"""
def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers:
'int'=1, activation: 'str'='relu', log_softmax: 'bool'=True,
dropout: 'float'=0.0, use_transformer_init: 'bool'=True) ->None:
"""
Initializes the Token Classifier module.
Args:
hidden_size: the size of the hidden dimension
num_classes: number of classes
num_layers: number of fully connected layers in the multilayer perceptron (MLP)
activation: activation to usee between fully connected layers in the MLP
log_softmax: whether to apply softmax to the output of the MLP
dropout: dropout to apply to the input hidden states
use_transformer_init: whether to initialize the weights of the classifier head with the same approach used in Transformer
"""
super().__init__()
self.log_softmax = log_softmax
self.mlp = MultiLayerPerceptron(hidden_size, num_classes,
num_layers=num_layers, activation=activation, log_softmax=
log_softmax)
self
# ... truncated (>4000 chars) for memory efficiency |
MultichannelIamge | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3f/c3fzeyeopdwc2ltlmlivpocuknebcj6ej2cz3ueq4yldr3lwgorg.py
# Topologically Sorted Source Nodes: [modulation, x], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# modulation => mul
# x => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 0.5), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 16)
x1 = (xindex // 16) % 4
x0 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/at/catyivxdqgliwunkrygqmudtr3fumc4lgfzxwaogch2z5s2rnn7a.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_6), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [modulation, x], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf0, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, primals_3, primals_4, primals_5, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ModulatedConv2d(nn.Module):
def __init__(self, channels_in, channels_out, style_dim, kernel_size,
demodulate=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(channels_out, channels_in,
kernel_size, kernel_size))
self.modulation = nn.Linear(style_dim, channels_in, bias=True)
self.modulation.bias.data.fill_(1.0)
self.demodulate = demodulate
if self.demodulate:
self.register_buffer('style_inv', torch.randn(1, 1, channels_in,
1, 1))
self.scale = 1.0 / math.sqrt(channels_in * kernel_size ** 2)
self.padding = kernel_size // 2
def forward(self, x, style):
modulation = self.get_modulation(style)
x = modulation * x
x = F.conv2d(x, self.weight, padding=self.padding)
if self.demodulate:
demodulation = self.get_demodulation(style)
x = demodulation * x
return x
def get_modulation(self, style):
style = self.modulation(style).view(style.size(0), -1, 1, 1)
modulation = self.scale * style
return modulation
def get_demodulation(self, style):
w = self.weight.unsqueeze(0)
norm = torch.rsqrt((self.scale * self.style_inv * w).pow(2).sum([2,
3, 4]) + 1e-08)
demodulation = norm
return demodulation.view(*demodulation.size(), 1, 1)
class MultichannelIamge(nn.Module):
def __init__(self, channels_in, channels_out, style_dim, kernel_size=1):
super().__init__()
self.conv = ModulatedConv2d(channels_in, channels_out, style_dim,
kernel_size, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, channels_out, 1, 1))
def forward(self, hidden, style):
out = self.conv(hidden, style)
out = out + self.bias
return out
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4, 'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 16
x1 = xindex // 16 % 4
x0 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](buf0, primals_2, primals_4, buf1,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_6
return buf3, primals_3, primals_4, primals_5, buf1
class ModulatedConv2d(nn.Module):
def __init__(self, channels_in, channels_out, style_dim, kernel_size,
demodulate=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(channels_out, channels_in,
kernel_size, kernel_size))
self.modulation = nn.Linear(style_dim, channels_in, bias=True)
self.modulation.bias.data.fill_(1.0)
self.demodulate = demodulate
if self.demodulate:
self.register_buffer('style_inv', torch.randn(1, 1, channels_in,
1, 1))
self.scale = 1.0 / math.sqrt(channels_in * kernel_size ** 2)
self.padding = kernel_size // 2
def forward(self, x, style):
modulation = self.get_modulation(style)
x = modulation * x
x = F.conv2d(x, self.weight, padding=self.padding)
if self.demodulate:
demodulation = self.get_demodulation(style)
x = demodulation * x
return x
def get_modulation(self, style):
style = self.modulation(style).view(style.size(0), -1, 1, 1)
modulation = self.scale * style
return modulation
def get_demodulation(self, style):
w = self.weight.unsqueeze(0)
norm = torch.rsqrt((self.scale * self.style_inv * w).pow(2).sum([2,
3, 4]) + 1e-08)
demodulation = norm
return demodulation.view(*demodulation.size(), 1, 1)
class MultichannelIamgeNew(nn.Module):
def __init__(self, channels_in, channels_out, style_dim, kernel_size=1):
super().__init__()
self.conv = ModulatedConv2d(channels_in, channels_out, style_dim,
kernel_size, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, channels_out, 1, 1))
def forward(self, input_0, input_1):
primals_6 = self.bias
primals_5 = self.conv.weight
primals_1 = self.conv.modulation.weight
primals_2 = self.conv.modulation.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| nhorton04/mobile_styletransfer | MultichannelIamge | false | 7,333 | [
"Apache-2.0"
] | 1 | db8b9a61b67fd58b9e4d61457ee58e36800cfbbe | https://github.com/nhorton04/mobile_styletransfer/tree/db8b9a61b67fd58b9e4d61457ee58e36800cfbbe | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ModulatedConv2d(nn.Module):
def __init__(self, channels_in, channels_out, style_dim, kernel_size,
demodulate=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(channels_out, channels_in,
kernel_size, kernel_size))
self.modulation = nn.Linear(style_dim, channels_in, bias=True)
self.modulation.bias.data.fill_(1.0)
self.demodulate = demodulate
if self.demodulate:
self.register_buffer('style_inv', torch.randn(1, 1, channels_in,
1, 1))
self.scale = 1.0 / math.sqrt(channels_in * kernel_size ** 2)
self.padding = kernel_size // 2
def forward(self, x, style):
modulation = self.get_modulation(style)
x = modulation * x
x = F.conv2d(x, self.weight, padding=self.padding)
if self.demodulate:
demodulation = self.get_demodulation(style)
x = demodulation * x
return x
def get_modulation(self, style):
style = self.modulation(style).view(style.size(0), -1, 1, 1)
modulation = self.scale * style
return modulation
def get_demodulation(self, style):
w = self.weight.unsqueeze(0)
norm = torch.rsqrt((self.scale * self.style_inv * w).pow(2).sum([2,
3, 4]) + 1e-08)
demodulation = norm
return demodulation.view(*demodulation.size(), 1, 1)
class Model(nn.Module):
def __init__(self, channels_in, channels_out, style_dim, kernel_size=1):
super().__init__()
self.conv = ModulatedConv2d(channels_in, channels_out, style_dim,
kernel_size, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, channels_out, 1, 1))
def forward(self, hidden, style):
out = self.conv(hidden, style)
out = out + self.bias
return out
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/e7/ce7ewq7bv76ie5hdmfxjj46viiuxlajdhtbost7f4gwclfa3hk4i.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rh/crhy65nd36tqy72rqqtbfjscgqa26ipbvjxps22h7ynhb26pc4bz.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zf/czfbs2xajbtbvyrzhxkchwr5kxngfkmy2bwzhuhrjiyfd6thjazn.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
triton_poi_fused__log_softmax_3 = async_compile.triton('triton_poi_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t7/ct7hbs7gpwy35jd64a3bsugmje7rjpsv76ux66eunbdu6dinfuun.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_4 = async_compile.triton('triton_poi_fused__log_softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4), (4, 1))
assert_size_stride(primals_3, (128, ), (1, ))
assert_size_stride(primals_4, (256, 128), (128, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (512, 256), (256, 1))
assert_size_stride(primals_7, (512, ), (1, ))
assert_size_stride(primals_8, (256, 512), (512, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (128, 256), (256, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf17 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf17, 8192, grid=grid(8192), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 256), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse
buf16 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf16, 16384, grid=grid(16384), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 512), (1, 256), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 512), (8192, 2048, 512, 1), 0); del buf4 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf15, 32768, grid=grid(32768), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 256), (1, 512), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf6 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf7, primals_9, buf14, 16384, grid=grid(16384), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (64, 256), (256, 1), 0), reinterpret_tensor(primals_10, (256, 128), (1, 256), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf8 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf9, primals_11, buf13, 8192, grid=grid(8192), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 128), (128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf10)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_3.run(buf10, buf11, 256, grid=grid(256), stream=stream0)
buf12 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_4.run(buf11, buf12, 256, grid=grid(256), stream=stream0)
del buf11
return (buf12, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(buf5, (64, 512), (512, 1), 0), reinterpret_tensor(buf7, (64, 256), (256, 1), 0), reinterpret_tensor(buf9, (64, 128), (128, 1), 0), buf12, primals_12, buf13, primals_10, buf14, primals_8, buf15, primals_6, buf16, primals_4, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((512, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class Net(nn.Module):
def __init__(self, input_size, num_classes):
super(Net, self).__init__()
self.linear1 = nn.Linear(input_size, 128)
self.linear2 = nn.Linear(128, 256)
self.linear3 = nn.Linear(256, 512)
self.linear4 = nn.Linear(512, 256)
self.linear5 = nn.Linear(256, 128)
self.linear6 = nn.Linear(128, num_classes)
def forward(self, x):
x = x.float()
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = F.relu(self.linear4(x))
x = F.relu(self.linear5(x))
x = self.linear6(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4), (4, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (256, 128), (128, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (512, 256), (256, 1))
assert_size_stride(primals_7, (512,), (1,))
assert_size_stride(primals_8, (256, 512), (512, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256), (256, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (4, 128), (128, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf17 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_3, buf17, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 256), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf16 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf3,
primals_5, buf16, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_6, (256, 512), (1, 256), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 512), (8192, 2048, 512, 1), 0
)
del buf4
buf15 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(32768)](buf5,
primals_7, buf15, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 512), (512, 1), 0),
reinterpret_tensor(primals_8, (512, 256), (1, 512), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf6
buf14 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf7,
primals_9, buf14, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_10, (256, 128), (1, 256), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf8
buf13 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf9,
primals_11, buf13, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_12, (128, 4), (1, 128),
0), alpha=1, beta=1, out=buf10)
del primals_13
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_3[grid(256)](buf10, buf11, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
triton_poi_fused__log_softmax_4[grid(256)](buf11, buf12, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf11
return (buf12, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(buf3, (64, 256), (256, 1), 0),
reinterpret_tensor(buf5, (64, 512), (512, 1), 0),
reinterpret_tensor(buf7, (64, 256), (256, 1), 0),
reinterpret_tensor(buf9, (64, 128), (128, 1), 0), buf12, primals_12,
buf13, primals_10, buf14, primals_8, buf15, primals_6, buf16,
primals_4, buf17)
class NetNew(nn.Module):
def __init__(self, input_size, num_classes):
super(NetNew, self).__init__()
self.linear1 = nn.Linear(input_size, 128)
self.linear2 = nn.Linear(128, 256)
self.linear3 = nn.Linear(256, 512)
self.linear4 = nn.Linear(512, 256)
self.linear5 = nn.Linear(256, 128)
self.linear6 = nn.Linear(128, num_classes)
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.linear4.weight
primals_9 = self.linear4.bias
primals_10 = self.linear5.weight
primals_11 = self.linear5.bias
primals_12 = self.linear6.weight
primals_13 = self.linear6.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| nce3xin/spam | Net | false | 7,334 | [
"MIT"
] | 1 | 908421d5cf2dd103e2a7044bf1c8586aaf5f2ada | https://github.com/nce3xin/spam/tree/908421d5cf2dd103e2a7044bf1c8586aaf5f2ada | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, input_size, num_classes):
super().__init__()
self.linear1 = nn.Linear(input_size, 128)
self.linear2 = nn.Linear(128, 256)
self.linear3 = nn.Linear(256, 512)
self.linear4 = nn.Linear(512, 256)
self.linear5 = nn.Linear(256, 128)
self.linear6 = nn.Linear(128, num_classes)
def forward(self, x):
x = x.float()
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = F.relu(self.linear4(x))
x = F.relu(self.linear5(x))
x = self.linear6(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm => add, rsqrt, var_mean
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3g/c3gbhm3y6wldudvsxdmmjh5ssg2uys5qqk3dd3k7bxnuot4xhndp.py
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# instance_norm => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6x/c6xlvvnj6ftmp7jka4547n3hpffcz5xr3op3wtbpv5povsb6rjue.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_2 => relu
# out_3 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2f/c2f3zyag2m4izdxiesppicmusepkhrwzgzb6br4znurpwo5cahc2.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6, out_7], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_4 => convolution_1
# out_5 => add_2, repeat_2, rsqrt_1, var_mean_1
# out_6 => add_4
# out_7 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + (16*x0)), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp32 = tl.full([1, 1], 0, tl.int32)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp34 = 0.0
tmp35 = tmp33 <= tmp34
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp3, xmask)
tl.store(out_ptr3 + (r3 + (16*x0)), tmp33, xmask)
tl.store(out_ptr4 + (r3 + (16*x0)), tmp35, xmask)
tl.store(out_ptr5 + (x0), tmp25, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_4, buf3, 16, grid=grid(16), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_5, buf4, 16, grid=grid(16), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf11 = buf10; del buf10 # reuse
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6, out_7], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward]
triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4.run(buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf18, buf16, 16, 16, grid=grid(16), stream=stream0)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16, ), (1, ), 0), buf18, reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = nn.InstanceNorm2d(channels, affine=True)
self.relu = nn.ReLU()
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = nn.InstanceNorm2d(channels, affine=True)
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp32 = tl.full([1, 1], 0, tl.int32)
tmp33 = triton_helpers.maximum(tmp32, tmp31)
tmp34 = 0.0
tmp35 = tmp33 <= tmp34
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask)
tl.store(out_ptr3 + (r3 + 16 * x0), tmp33, xmask)
tl.store(out_ptr4 + (r3 + 16 * x0), tmp35, xmask)
tl.store(out_ptr5 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf6
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5,
buf8, buf3, buf4, buf9, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16,), (1,), torch.float32)
buf11 = buf10
del buf10
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_relu_repeat_threshold_backward_4[
grid(16)](buf11, primals_8, primals_7, primals_9, primals_1,
buf12, buf13, buf17, buf18, buf16, 16, 16, XBLOCK=8, num_warps=
2, num_stages=1)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8,
buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0),
buf18, reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlockNew(nn.Module):
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = nn.InstanceNorm2d(channels, affine=True)
self.relu = nn.ReLU()
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = nn.InstanceNorm2d(channels, affine=True)
def forward(self, input_0):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.in1.weight
primals_5 = self.in1.bias
primals_6 = self.conv2.conv2d.weight
primals_7 = self.conv2.conv2d.bias
primals_8 = self.in2.weight
primals_9 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| naver-ai/cgl_fairness | ResidualBlock | false | 7,335 | [
"MIT"
] | 1 | 00d3bec233c9b3e0f88496118abaed8321ca3159 | https://github.com/naver-ai/cgl_fairness/tree/00d3bec233c9b3e0f88496118abaed8321ca3159 | import torch
import torch.nn as nn
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super().__init__()
padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class Model(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = nn.InstanceNorm2d(channels, affine=True)
self.relu = nn.ReLU()
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = nn.InstanceNorm2d(channels, affine=True)
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PositionGenerator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/df/cdfcie57v6pcdd6oeaz4mvlgksxgyuxzmlv5bklwemyulqhtcxta.py
# Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# std => sqrt, var
# sub => sub
# truediv => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_2, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_4), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tt/ctttzgnguystw446sgeisgiema54yz5d5wmngsz4pinf7chzr4i7.py
# Topologically Sorted Source Nodes: [out_masked], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# out_masked => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %unsqueeze), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x5), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (3, 4), (4, 1))
assert_size_stride(primals_6, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_3, primals_2, primals_4, buf0, 256, grid=grid(256), stream=stream0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_masked], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf0, primals_1, buf1, 1024, grid=grid(1024), stream=stream0)
del buf0
buf2 = empty_strided_cuda((256, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [projected], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (256, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 3), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
return (reinterpret_tensor(buf2, (4, 4, 4, 4, 3), (192, 48, 12, 3, 1), 0), primals_1, primals_2, reinterpret_tensor(buf1, (256, 4), (4, 1), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((3, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionGenerator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model):
super(PositionGenerator, self).__init__()
self.norm = LayerNorm(d_model)
self.proj = nn.Linear(d_model, 3)
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = self.norm(x) * mask
projected = self.proj(out_masked)
return projected
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 256
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x5, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (3, 4), (4, 1))
assert_size_stride(primals_6, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_3,
primals_2, primals_4, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_1[grid(1024)](buf0, primals_1, buf1, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((256, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_5, (4, 3), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_6
return reinterpret_tensor(buf2, (4, 4, 4, 4, 3), (192, 48, 12, 3, 1), 0
), primals_1, primals_2, reinterpret_tensor(buf1, (256, 4), (4, 1), 0
), primals_5
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionGeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model):
super(PositionGeneratorNew, self).__init__()
self.norm = LayerNorm(d_model)
self.proj = nn.Linear(d_model, 3)
def forward(self, input_0, input_1):
primals_3 = self.norm.a_2
primals_4 = self.norm.b_2
primals_5 = self.proj.weight
primals_6 = self.proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| nigelnnk/MATCh-sensitivity | PositionGenerator | false | 7,336 | [
"MIT"
] | 1 | aaf2b924ac98c8c5925bbf431481724d11a102f8 | https://github.com/nigelnnk/MATCh-sensitivity/tree/aaf2b924ac98c8c5925bbf431481724d11a102f8 | import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class Model(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model):
super().__init__()
self.norm = LayerNorm(d_model)
self.proj = nn.Linear(d_model, 3)
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = self.norm(x) * mask
projected = self.proj(out_masked)
return projected
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
EdgeFeaturesLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lx/clxenpwkl4qtcky22cudzrb6ruwgm2vjfzwtegj2siml77dc4lga.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf1 # reuse
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class EdgeFeaturesLayer(nn.Module):
def __init__(self, d_model, d_edge, h, dropout):
super(EdgeFeaturesLayer, self).__init__()
assert d_model % h == 0
d_model // h
self.linear = nn.Linear(d_edge, 1, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.25)
def forward(self, x):
p_edge = x.permute(0, 2, 3, 1)
p_edge = self.linear(p_edge).permute(0, 3, 1, 2)
return torch.relu(p_edge)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_edge': 4, 'h': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf3
class EdgeFeaturesLayerNew(nn.Module):
def __init__(self, d_model, d_edge, h, dropout):
super(EdgeFeaturesLayerNew, self).__init__()
assert d_model % h == 0
d_model // h
self.linear = nn.Linear(d_edge, 1, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.25)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| nigelnnk/MATCh-sensitivity | EdgeFeaturesLayer | false | 7,337 | [
"MIT"
] | 1 | aaf2b924ac98c8c5925bbf431481724d11a102f8 | https://github.com/nigelnnk/MATCh-sensitivity/tree/aaf2b924ac98c8c5925bbf431481724d11a102f8 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, d_model, d_edge, h, dropout):
super().__init__()
assert d_model % h == 0
d_model // h
self.linear = nn.Linear(d_edge, 1, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.25)
def forward(self, x):
p_edge = x.permute(0, 2, 3, 1)
p_edge = self.linear(p_edge).permute(0, 3, 1, 2)
return torch.relu(p_edge)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
Generator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pj/cpjtaps4dusmzrrvkgal3gobvvp6pxqqx5zvwpjyzgdjffmdihfr.py
# Topologically Sorted Source Nodes: [out_masked, out_sum, mask_sum, out_avg_pooling], Original ATen: [aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# mask_sum => sum_2
# out_avg_pooling => div
# out_masked => mul
# out_sum => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %unsqueeze), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%unsqueeze, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_poi_fused_div_mul_sum_0 = async_compile.triton('triton_poi_fused_div_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = tmp14 / tmp17
tl.store(out_ptr0 + (x4), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_masked, out_sum, mask_sum, out_avg_pooling], Original ATen: [aten.mul, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [projected], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
return (reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
class Generator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, aggregation_type='mean', n_output=1,
n_layers=1, leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super(Generator, self).__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else
LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = x * mask
if self.aggregation_type == 'mean':
out_sum = out_masked.sum(dim=1)
mask_sum = mask.sum(dim=1)
out_avg_pooling = out_sum / mask_sum
elif self.aggregation_type == 'sum':
out_sum = out_masked.sum(dim=1)
out_avg_pooling = out_sum
elif self.aggregation_type == 'dummy_node':
out_avg_pooling = out_masked[:, 0]
projected = self.proj(out_avg_pooling)
return projected
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x1 = xindex // 4 % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = tmp14 / tmp17
tl.store(out_ptr0 + x4, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
return reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
class GeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, aggregation_type='mean', n_output=1,
n_layers=1, leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super(GeneratorNew, self).__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else
LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, input_0, input_1):
primals_3 = self.proj.weight
primals_4 = self.proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| nigelnnk/MATCh-sensitivity | Generator | false | 7,338 | [
"MIT"
] | 1 | aaf2b924ac98c8c5925bbf431481724d11a102f8 | https://github.com/nigelnnk/MATCh-sensitivity/tree/aaf2b924ac98c8c5925bbf431481724d11a102f8 | import math
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super().__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
class Model(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, aggregation_type='mean', n_output=1,
n_layers=1, leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super().__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else
LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = x * mask
if self.aggregation_type == 'mean':
out_sum = out_masked.sum(dim=1)
mask_sum = mask.sum(dim=1)
out_avg_pooling = out_sum / mask_sum
elif self.aggregation_type == 'sum':
out_sum = out_masked.sum(dim=1)
out_avg_pooling = out_sum
elif self.aggregation_type == 'dummy_node':
out_avg_pooling = out_masked[:, 0]
projected = self.proj(out_avg_pooling)
return projected
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SqueezeNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ze/czeyd3qjsq546c7ea763ybzbn4sb4zzidmbxe2coosrykwwb4pit.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 288
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sf/csfphurxkfilliqpsa7cfr3pqkfaef7yr7uzm2nhhxuzpah3kv4x.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (16*x2) + (144*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k6/ck6mpoqrm3een2gnzk3q7avn7if4q5njkh6yuf2lcdtfooev6ukp.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/e3/ce3476wuixbg7whdmceldres75gmr262efy4sgv4xcritwmi4xir.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 9216
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 48
y1 = (yindex // 48)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (48*x2) + (432*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wa/cwasc5xshefzagbizx6nhfjaifdz7vqj4evbpydruvtdugd4lhfp.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4j/c4jshfefqkzdtvcidkjhrzjj55ta4bzr5nwbf2nuzdoc75mmiayw.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 322944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hn/chn7w4dttiseahpxxhjjrfcqpaj5jhrmdzbhyzmmkrkwf57xro7q.py
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_3 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 75264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 96
x1 = (xindex // 96) % 14
x2 = (xindex // 1344) % 14
x3 = (xindex // 18816)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (96 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp3 = tl.load(in_ptr0 + (192 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (2784 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp7 = tl.load(in_ptr0 + (2880 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp9 = tl.load(in_ptr0 + (2976 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp11 = tl.load(in_ptr0 + (5568 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp13 = tl.load(in_ptr0 + (5664 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp15 = tl.load(in_ptr0 + (5760 + x0 + (192*x1) + (5568*x2) + (80736*x3)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x4), tmp16, xmask)
tl.store(out_ptr1 + (x4), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n2/cn2zhaw5q6d3p7elstsunhq6ybcjaqtshcx4id6mvj7r7pp6muc7.py
# Topologically Sorted Source Nodes: [conv2d_1, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/56/c56yi7b4gudcyxqcuy62nlzwkpzyvbfci2mdojw3gbunflpxfmwb.py
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input_4 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_2, %relu_3], 1), kwargs = {})
triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 128, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((-64) + x0), tmp12, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x2), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qe/cqeo4l5igb7ssqpg4qcf256ohiqzstzkbcwi5m3qi4a33t2cbk6c.py
# Topologically Sorted Source Nodes: [conv2d_7, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# x_2 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fj/cfj6u5lcpgelhrmdirkhqtdee5oy6idtzgzhl5xjlut6mwmcq5ez.py
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input_6 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_8, %relu_9], 1), kwargs = {})
triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 200704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((128*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 256, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((128*x1) + ((-128) + x0)), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((-128) + x0), tmp12, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x2), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l3/cl3lx4bd6l5at6p2m7izsqz2tw7jh6dw7dimwozxxwufrimtfiqz.py
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_7 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 1792) % 7
x1 = (xindex // 256) % 7
x0 = xindex % 256
x5 = (xindex // 1792)
x6 = xindex
tmp0 = 2*x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 14, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + (512*x1) + (7168*x5)), tmp10 & xmask, other=float("-inf"))
tmp12 = 1 + (2*x1)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (7168*x5)), tmp16 & xmask, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 + (2*x1)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (7168*x5)), tmp23 & xmask, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 1 + (2*x2)
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (3584 + x0 + (512*x1) + (7168*x5)), tmp30 & xmask, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (3840 + x0 + (512*x1) + (7168*x5)), tmp33 & xmask, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (7168*x5)), tmp36 & xmask, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 2 + (2*x2)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (7168 + x0 + (512*x1) + (7168*x5)), tmp43 & xmask, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (7424 + x0 + (512*x1) + (7168*x5)), tmp46 & xmask, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (7680 + x0 + (512*x1) + (7168*x5)), tmp49 & xmask, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x6), tmp51, xmask)
tl.store(out_ptr1 + (x6), tmp76, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/my/cmyhxeiv3z2ual3ueuhemlx25ba554moz6ij24ubkagg3qxd2jmk.py
# Topologically Sorted Source Nodes: [conv2d_10, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# x_3 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/b2/cb2xdk2o6mgvhtsoy5gptyuqevcymafmzpecvobhsolx4covjirs.py
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input_8 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_11, %relu_12], 1), kwargs = {})
triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = (xindex // 256)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((128*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 256, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((128*x1) + ((-128) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((-128) + x0), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pa/cpazhbfndas4amtcw2kihst5qrnlydkbieelfyb5h22pw4z5wwqp.py
# Topologically Sorted Source Nodes: [conv2d_13, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# x_4 => relu_13
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_13 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {})
triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wb/cwbn5mqw2v2zzldjs4ac7oua67izn3hsaqm62k4yghm3yuppsilx.py
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input_9 => cat_4
# Graph fragment:
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_14, %relu_15], 1), kwargs = {})
triton_poi_fused_cat_16 = async_compile.triton('triton_poi_fused_cat_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 75264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 384
x1 = (xindex // 384)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 192, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((192*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 384, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((192*x1) + ((-192) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((-192) + x0), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5c/c5cxr2sgesuh3ykfbyxbkhz6ci32ejiuiodkmjw2os5hw3xwjxoh.py
# Topologically Sorted Source Nodes: [conv2d_19, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_19 => convolution_19
# x_6 => relu_19
# Graph fragment:
# %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_5, %primals_40, %primals_41, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_19 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_19,), kwargs = {})
triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgog67rtzwolkpyjsykd37vy6ax6lzk7vkr6q33ga6ewlzj7xpgx.py
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input_11 => cat_6
# Graph fragment:
# %cat_6 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_20, %relu_21], 1), kwargs = {})
triton_poi_fused_cat_18 = async_compile.triton('triton_poi_fused_cat_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = (xindex // 512)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((256*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 512, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((256*x1) + ((-256) + x0)), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((-256) + x0), tmp12, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x2), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pp/cppl4cftexd7sjwxqm6twssr34eku3qwgtvw3vck2g5ezk5nery6.py
# Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_12 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_19 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = (xindex // 512) % 3
x2 = (xindex // 1536) % 3
x3 = (xindex // 4608)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp1 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp5 = tl.load(in_ptr0 + (3584 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp9 = tl.load(in_ptr0 + (4608 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp11 = tl.load(in_ptr0 + (7168 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp13 = tl.load(in_ptr0 + (7680 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp15 = tl.load(in_ptr0 + (8192 + x0 + (1024*x1) + (7168*x2) + (25088*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x4), tmp16, None)
tl.store(out_ptr1 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/45/c45jgdzesyuhk6beyhfbmirm3tamhabwhi5pwvke7xes3jeijxrf.py
# Topologically Sorted Source Nodes: [conv2d_22, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_22 => convolution_22
# x_7 => relu_22
# Graph fragment:
# %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_46, %primals_47, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_22 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_22,), kwargs = {})
triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5r/c5rz37wlpjddqpmthp3dlgcpqap4tjp7gah53ngg3v3clqnewuwd.py
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input_13 => cat_7
# Graph fragment:
# %cat_7 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_23, %relu_24], 1), kwargs = {})
triton_poi_fused_cat_21 = async_compile.triton('triton_poi_fused_cat_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = (xindex // 512)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((256*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 512, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((256*x1) + ((-256) + x0)), tmp12, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + ((-256) + x0), tmp12, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x2), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mg/cmgqvp7lrzfft3pjfzc6h6aiqy7lluxn5tlkddijz66oiza6wngk.py
# Topologically Sorted Source Nodes: [input_15, input_16, input_17], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# input_15 => convolution_25
# input_16 => relu_25
# input_17 => mean
# Graph fragment:
# %convolution_25 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_7, %primals_52, %primals_53, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_25,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_25, [-1, -2], True), kwargs = {})
triton_per_fused_convolution_mean_relu_22 = async_compile.triton('triton_per_fused_convolution_mean_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4096, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_mean_relu_22', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_mean_relu_22(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4000
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 1000
x1 = (xindex // 1000)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (1000*r2) + (9000*x1)), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(rmask & xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 9.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/u7/cu7dpusjnm7yt3b5etrxmm4vvsl2ugywwulnxuijjlly4x34mfz4.py
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# input_15 => convolution_25
# input_16 => relu_25
# Graph fragment:
# %convolution_25 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_7, %primals_52, %primals_53, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_25,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_25, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_23 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3q/c3qoii36do2jtam7m3jhlytuykkypmcqmr7twk4w5stkyslqhvdx.py
# Topologically Sorted Source Nodes: [conv2d_24, relu_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_24 => convolution_24
# relu_24 => relu_24
# Graph fragment:
# %convolution_24 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_22, %primals_50, %primals_51, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_24 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_24,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_24, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_24 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ll/cllrnxsks4gdn3w6tf5gxd2u2stzyjmsqql4o7rrn4l4ga7j2fmq.py
# Topologically Sorted Source Nodes: [conv2d_21, relu_21], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_21 => convolution_21
# relu_21 => relu_21
# Graph fragment:
# %convolution_21 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_19, %primals_44, %primals_45, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_21 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_21,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_21, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_25 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_25(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hx/chx7vabdpspv3vt5tkxlo5dyphcmzc6xudpgw3n4harfu77ycpw5.py
# Topologically Sorted Source Nodes: [conv2d_18, relu_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_18 => convolution_18
# relu_18 => relu_18
# Graph fragment:
# %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_16, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_18 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_18,), kwargs = {})
# %le_7 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_18, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_26 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_26(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 37632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t2/ct22ibtznsxt52xgmjekx6wdrldib6bgixp2xemxlkpijdgasznb.py
# Topologically Sorted Source Nodes: [conv2d_12, relu_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# relu_12 => relu_12
# Graph fragment:
# %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {})
# %le_13 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_12, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_27 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_27', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_27(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vf/cvfcywcoir6zz63sb5ykrqafgtqsyn4rbib4xawb7ugv3tk5pirj.py
# Topologically Sorted Source Nodes: [conv2d_9, relu_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_9 => convolution_9
# relu_9 => relu_9
# Graph fragment:
# %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {})
# %le_16 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_9, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_28 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o6/co6j4d6e2424ff7a23ugfqqsp3f7dkkmpauq2qrab36olqyt25hs.py
# Topologically Sorted Source Nodes: [conv2d_6, relu_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# relu_6 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
# %le_19 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_29 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_29(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53 = args
args.clear()
assert_size_stride(primals_1, (96, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (96, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 96, 1, 1), (96, 1, 1, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (64, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (16, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (64, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (32, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_17, (32, ), (1, ))
assert_size_stride(primals_18, (128, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_19, (128, ), (1, ))
assert_size_stride(primals_20, (128, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_21, (128, ), (1, ))
assert_size_stride(primals_22, (32, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_23, (32, ), (1, ))
assert_size_stride(primals_24, (128, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_25, (128, ), (1, ))
assert_size_stride(primals_26, (128, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (48, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_29, (48, ), (1, ))
assert_size_stride(primals_30, (192, 48, 1, 1), (48, 1, 1, 1))
assert_size_stride(primals_31, (192, ), (1, ))
assert_size_stride(primals_32, (192, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_33, (192, ), (1, ))
assert_size_stride(primals_34, (48, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_35, (48, ), (1, ))
assert_size_stride(primals_36, (192, 48, 1, 1), (48, 1, 1, 1))
assert_size_stride(primals_37, (192, ), (1, ))
assert_size_stride(primals_38, (192, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_39, (192, ), (1, ))
assert_size_stride(primals_40, (64, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_41, (64, ), (1, ))
assert_size_stride(primals_42, (256, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_43, (256, ), (1, ))
assert_size_stride(primals_44, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (256, ), (1, ))
assert_size_stride(primals_46, (64, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_47, (64, ), (1, ))
assert_size_stride(primals_48, (256, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_49, (256, ), (1, ))
assert_size_stride(primals_50, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_51, (256, ), (1, ))
assert_size_stride(primals_52, (1000, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_53, (1000, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((96, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 288, 49, grid=grid(288, 49), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 16, 3, 3), (144, 1, 48, 16), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf2, 1024, 9, grid=grid(1024, 9), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((64, 16, 3, 3), (144, 1, 48, 16), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_14, buf3, 1024, 9, grid=grid(1024, 9), stream=stream0)
del primals_14
buf4 = empty_strided_cuda((128, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_20, buf4, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_20
buf5 = empty_strided_cuda((128, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_26, buf5, 4096, 9, grid=grid(4096, 9), stream=stream0)
del primals_26
buf6 = empty_strided_cuda((192, 48, 3, 3), (432, 1, 144, 48), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_32, buf6, 9216, 9, grid=grid(9216, 9), stream=stream0)
del primals_32
buf7 = empty_strided_cuda((192, 48, 3, 3), (432, 1, 144, 48), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_38, buf7, 9216, 9, grid=grid(9216, 9), stream=stream0)
del primals_38
buf8 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_44, buf8, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_44
buf9 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_50, buf9, 16384, 9, grid=grid(16384, 9), stream=stream0)
del primals_50
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 96, 29, 29), (80736, 1, 2784, 96))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf11, primals_2, 322944, grid=grid(322944), stream=stream0)
del primals_2
buf12 = empty_strided_cuda((4, 96, 14, 14), (18816, 1, 1344, 96), torch.float32)
buf13 = empty_strided_cuda((4, 96, 14, 14), (18816, 1, 1344, 96), torch.int8)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf11, buf12, buf13, 75264, grid=grid(75264), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 14, 14), (3136, 1, 224, 16))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf15, primals_5, 12544, grid=grid(12544), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 14, 14), (12544, 1, 896, 64))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 14, 14), (12544, 1, 896, 64))
buf18 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128), torch.float32)
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf16, primals_7, buf17, primals_9, buf18, 100352, grid=grid(100352), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 16, 14, 14), (3136, 1, 224, 16))
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf20, primals_11, 12544, grid=grid(12544), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 14, 14), (12544, 1, 896, 64))
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf20, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 14, 14), (12544, 1, 896, 64))
buf23 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128), torch.float32)
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf21, primals_13, buf22, primals_15, buf23, 100352, grid=grid(100352), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 14, 14), (6272, 1, 448, 32))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf25, primals_17, 25088, grid=grid(25088), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 14, 14), (25088, 1, 1792, 128))
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf25, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 14, 14), (25088, 1, 1792, 128))
buf28 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256), torch.float32)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.cat]
triton_poi_fused_cat_11.run(buf26, primals_19, buf27, primals_21, buf28, 200704, grid=grid(200704), stream=stream0)
buf29 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256), torch.float32)
buf30 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256), torch.int8)
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf28, buf29, buf30, 50176, grid=grid(50176), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf31 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 32, 7, 7), (1568, 1, 224, 32))
buf32 = buf31; del buf31 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf32, primals_23, 6272, grid=grid(6272), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf32, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 128, 7, 7), (6272, 1, 896, 128))
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf32, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 7, 7), (6272, 1, 896, 128))
buf35 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256), torch.float32)
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf33, primals_25, buf34, primals_27, buf35, 50176, grid=grid(50176), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 48, 7, 7), (2352, 1, 336, 48))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf37, primals_29, 9408, grid=grid(9408), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 192, 7, 7), (9408, 1, 1344, 192))
# Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf37, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf40 = empty_strided_cuda((4, 384, 7, 7), (18816, 1, 2688, 384), torch.float32)
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.cat]
triton_poi_fused_cat_16.run(buf38, primals_31, buf39, primals_33, buf40, 75264, grid=grid(75264), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf41 = extern_kernels.convolution(buf40, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 48, 7, 7), (2352, 1, 336, 48))
buf42 = buf41; del buf41 # reuse
# Topologically Sorted Source Nodes: [conv2d_16, x_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf42, primals_35, 9408, grid=grid(9408), stream=stream0)
del primals_35
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf42, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 192, 7, 7), (9408, 1, 1344, 192))
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf42, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf45 = empty_strided_cuda((4, 384, 7, 7), (18816, 1, 2688, 384), torch.float32)
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.cat]
triton_poi_fused_cat_16.run(buf43, primals_37, buf44, primals_39, buf45, 75264, grid=grid(75264), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_40, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 64, 7, 7), (3136, 1, 448, 64))
buf47 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [conv2d_19, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf47, primals_41, 12544, grid=grid(12544), stream=stream0)
del primals_41
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 256, 7, 7), (12544, 1, 1792, 256))
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf49 = extern_kernels.convolution(buf47, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 256, 7, 7), (12544, 1, 1792, 256))
buf50 = empty_strided_cuda((4, 512, 7, 7), (25088, 1, 3584, 512), torch.float32)
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.cat]
triton_poi_fused_cat_18.run(buf48, primals_43, buf49, primals_45, buf50, 100352, grid=grid(100352), stream=stream0)
buf51 = empty_strided_cuda((4, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
buf52 = empty_strided_cuda((4, 512, 3, 3), (4608, 1, 1536, 512), torch.int8)
# Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_19.run(buf50, buf51, buf52, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
buf53 = extern_kernels.convolution(buf51, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 64, 3, 3), (576, 1, 192, 64))
buf54 = buf53; del buf53 # reuse
# Topologically Sorted Source Nodes: [conv2d_22, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf54, primals_47, 2304, grid=grid(2304), stream=stream0)
del primals_47
# Topologically Sorted Source Nodes: [conv2d_23], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf54, primals_48, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 256, 3, 3), (2304, 1, 768, 256))
# Topologically Sorted Source Nodes: [conv2d_24], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf54, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 256, 3, 3), (2304, 1, 768, 256))
buf57 = empty_strided_cuda((4, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf55, primals_49, buf56, primals_51, buf57, 18432, grid=grid(18432), stream=stream0)
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution]
buf58 = extern_kernels.convolution(buf57, primals_52, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 1000, 3, 3), (9000, 1, 3000, 1000))
buf59 = empty_strided_cuda((4, 1000, 1, 1), (1000, 1, 4000, 4000), torch.float32)
buf60 = reinterpret_tensor(buf59, (4, 1000, 1, 1), (1000, 1, 1, 1), 0); del buf59 # reuse
# Topologically Sorted Source Nodes: [input_15, input_16, input_17], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_per_fused_convolution_mean_relu_22.run(buf60, buf58, primals_53, 4000, 9, grid=grid(4000), stream=stream0)
buf61 = empty_strided_cuda((4, 1000, 3, 3), (9000, 1, 3000, 1000), torch.bool)
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_23.run(buf58, primals_53, buf61, 36000, grid=grid(36000), stream=stream0)
del buf58
del primals_53
buf62 = empty_strided_cuda((4, 256, 3, 3), (2304, 1, 768, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_24, relu_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_24.run(buf56, primals_51, buf62, 9216, grid=grid(9216), stream=stream0)
del buf56
del primals_51
buf63 = empty_strided_cuda((4, 256, 3, 3), (2304, 1, 768, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_23, relu_23], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_24.run(buf55, primals_49, buf63, 9216, grid=grid(9216), stream=stream0)
del buf55
del primals_49
buf64 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_21, relu_21], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_25.run(buf49, primals_45, buf64, 50176, grid=grid(50176), stream=stream0)
del buf49
del primals_45
buf65 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_20, relu_20], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_25.run(buf48, primals_43, buf65, 50176, grid=grid(50176), stream=stream0)
del buf48
del primals_43
buf66 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_18, relu_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_26.run(buf44, primals_39, buf66, 37632, grid=grid(37632), stream=stream0)
del buf44
del primals_39
buf67 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_17, relu_17], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_26.run(buf43, primals_37, buf67, 37632, grid=grid(37632), stream=stream0)
del buf43
del primals_37
buf68 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_15, relu_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_26.run(buf39, primals_33, buf68, 37632, grid=grid(37632), stream=stream0)
del buf39
del primals_33
buf69 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_14, relu_14], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_26.run(buf38, primals_31, buf69, 37632, grid=grid(37632), stream=stream0)
del buf38
del primals_31
buf70 = empty_strided_cuda((4, 128, 7, 7), (6272, 1, 896, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, relu_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_27.run(buf34, primals_27, buf70, 25088, grid=grid(25088), stream=stream0)
del buf34
del primals_27
buf71 = empty_strided_cuda((4, 128, 7, 7), (6272, 1, 896, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_11, relu_11], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_27.run(buf33, primals_25, buf71, 25088, grid=grid(25088), stream=stream0)
del buf33
del primals_25
buf72 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_9, relu_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_28.run(buf27, primals_21, buf72, 100352, grid=grid(100352), stream=stream0)
del buf27
del primals_21
buf73 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_8, relu_8], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_28.run(buf26, primals_19, buf73, 100352, grid=grid(100352), stream=stream0)
del buf26
del primals_19
buf74 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_6, relu_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_29.run(buf22, primals_15, buf74, 50176, grid=grid(50176), stream=stream0)
del buf22
del primals_15
buf75 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_5, relu_5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_29.run(buf21, primals_13, buf75, 50176, grid=grid(50176), stream=stream0)
del buf21
del primals_13
buf76 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_29.run(buf17, primals_9, buf76, 50176, grid=grid(50176), stream=stream0)
del buf17
del primals_9
buf77 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_29.run(buf16, primals_7, buf77, 50176, grid=grid(50176), stream=stream0)
del buf16
del primals_7
return (buf60, buf0, buf1, primals_4, primals_6, buf2, primals_10, primals_12, buf3, primals_16, primals_18, buf4, primals_22, primals_24, buf5, primals_28, primals_30, buf6, primals_34, primals_36, buf7, primals_40, primals_42, buf8, primals_46, primals_48, buf9, primals_52, buf11, buf12, buf13, buf15, buf18, buf20, buf23, buf25, buf28, buf29, buf30, buf32, buf35, buf37, buf40, buf42, buf45, buf47, buf50, buf51, buf52, buf54, buf57, buf61, buf62, buf63, buf64, buf65, buf66, buf67, buf68, buf69, buf70, buf71, buf72, buf73, buf74, buf75, buf76, buf77, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((96, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 96, 1, 1), (96, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((32, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((128, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((32, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((48, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((192, 48, 1, 1), (48, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((192, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((48, 384, 1, 1), (384, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((192, 48, 1, 1), (48, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((192, 48, 3, 3), (432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((64, 384, 1, 1), (384, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((256, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((256, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((64, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((256, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((256, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((1000, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((1000, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import copy
import torch
import torch.nn as nn
import torch.utils.data
from torchvision.models.squeezenet import squeezenet1_0
from torchvision.models.squeezenet import squeezenet1_1
import torch.nn.modules.activation
class GramMatrix(nn.Module):
def forward(self, x):
b, c, h, w = x.size()
F = x.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
class GramDiag(nn.Module):
"""
docstring for GramDiag
"""
def __init__(self, gram_diagonal_squared=False):
super().__init__()
self.__gram_diagonal_squared = gram_diagonal_squared
def forward(self, x):
b, c, h, w = x.size()
x = x.view(b, c, 1, h * w)
gram_diag = None
for b in range(x.size(0)):
if self.__gram_diagonal_squared:
z = torch.bmm(x[b] * x[b], (x[b] * x[b]).transpose(2, 1))
else:
z = torch.bmm(x[b], x[b].transpose(2, 1))
if isinstance(gram_diag, torch.Tensor):
gram_diag = torch.cat(gram_diag, z)
else:
gram_diag = z
gram_diag = torch.squeeze(gram_diag).unsqueeze(0)
return gram_diag.div_(h * w)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000, pretrained=False,
layer='', gram=False, gram_diag=False, gram_diagonal_squared=False):
super().__init__()
if version not in [1.0, 1.1]:
raise ValueError(
'Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'
.format(version=version))
self.num_classes = num_classes
if version == 1.0:
pytorch_squeeze = squeezenet1_0(pretrained, num_classes=num_classes
)
features_names = ['conv_1', 'relu_1', 'maxpool_1', 'fire_2',
'fire_3', 'fire_4', 'maxpool_4', 'fire_5', 'fire_6',
'fire_7', 'fire_8', 'maxpool_8', 'fire_9']
else:
pytorch_squeeze = squeezenet1_1(pretrained, num_classes=num_classes
)
features_names = ['conv_1', 'relu_1', 'maxpool_1', 'fire_2',
'fire_3', 'maxpool_3', 'fire_4', 'fire_5', 'maxpool_5',
'fire_6', 'fire_7', 'fire_8', 'fire_9']
classifier_names = ['drop_10', 'conv_10', 'relu_10', 'avgpool_10']
self.features = torch.nn.Sequential()
for name, module in zip(features_names, pytorch_squeeze.features):
self.features.add_module(name, copy.deepcopy(module))
if layer is name:
break
if len(features_names) == len(self.features
) and layer != features_names[-1]:
for name, module in zip(classifier_names, pytorch_squeeze.
classifier):
self.features.add_module(name, copy.deepcopy(module))
if layer is name:
break
del pytorch_squeeze
if gram:
self.features.add_module('gram matrix', GramMatrix())
elif gram_diag:
self.features.add_module('gram diagonal', GramDiag(
gram_diagonal_squared))
def forward(self, x):
return self.features(x)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import copy
import torch.nn as nn
import torch.utils.data
from torchvision.models.squeezenet import squeezenet1_0
from torchvision.models.squeezenet import squeezenet1_1
import torch.nn.modules.activation
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 288
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 48
y1 = yindex // 48
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 48 * x2 + 432 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 322944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 96
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 75264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 96
x1 = xindex // 96 % 14
x2 = xindex // 1344 % 14
x3 = xindex // 18816
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 192 * x1 + 5568 * x2 + 80736 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (96 + x0 + 192 * x1 + 5568 * x2 + 80736 * x3),
xmask)
tmp3 = tl.load(in_ptr0 + (192 + x0 + 192 * x1 + 5568 * x2 + 80736 * x3),
xmask)
tmp5 = tl.load(in_ptr0 + (2784 + x0 + 192 * x1 + 5568 * x2 + 80736 * x3
), xmask)
tmp7 = tl.load(in_ptr0 + (2880 + x0 + 192 * x1 + 5568 * x2 + 80736 * x3
), xmask)
tmp9 = tl.load(in_ptr0 + (2976 + x0 + 192 * x1 + 5568 * x2 + 80736 * x3
), xmask)
tmp11 = tl.load(in_ptr0 + (5568 + x0 + 192 * x1 + 5568 * x2 + 80736 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (5664 + x0 + 192 * x1 + 5568 * x2 + 80736 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (5760 + x0 + 192 * x1 + 5568 * x2 + 80736 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (-64 + x0), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (128 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp15 = tl.load(in_ptr2 + (128 * x1 + (-128 + x0)), tmp12,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (-128 + x0), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 1792 % 7
x1 = xindex // 256 % 7
x0 = xindex % 256
x5 = xindex // 1792
x6 = xindex
tmp0 = 2 * x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 14, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x1
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (x0 + 512 * x1 + 7168 * x5), tmp10 & xmask,
other=float('-inf'))
tmp12 = 1 + 2 * x1
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 7168 * x5), tmp16 &
xmask, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 + 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 7168 * x5), tmp23 &
xmask, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 1 + 2 * x2
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (3584 + x0 + 512 * x1 + 7168 * x5), tmp30 &
xmask, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (3840 + x0 + 512 * x1 + 7168 * x5), tmp33 &
xmask, other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 7168 * x5), tmp36 &
xmask, other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 2 + 2 * x2
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (7168 + x0 + 512 * x1 + 7168 * x5), tmp43 &
xmask, other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (7424 + x0 + 512 * x1 + 7168 * x5), tmp46 &
xmask, other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (7680 + x0 + 512 * x1 + 7168 * x5), tmp49 &
xmask, other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x6, tmp51, xmask)
tl.store(out_ptr1 + x6, tmp76, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6272
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (128 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp15 = tl.load(in_ptr2 + (128 * x1 + (-128 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (-128 + x0), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 9408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 75264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 384
x1 = xindex // 384
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 192, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (192 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 384, tl.int64)
tmp15 = tl.load(in_ptr2 + (192 * x1 + (-192 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (-192 + x0), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp15 = tl.load(in_ptr2 + (256 * x1 + (-256 + x0)), tmp12,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (-256 + x0), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512 % 3
x2 = xindex // 1536 % 3
x3 = xindex // 4608
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 7168 * x2 + 25088 * x3), None)
tmp1 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 7168 * x2 + 25088 * x3
), None)
tmp3 = tl.load(in_ptr0 + (1024 + x0 + 1024 * x1 + 7168 * x2 + 25088 *
x3), None)
tmp5 = tl.load(in_ptr0 + (3584 + x0 + 1024 * x1 + 7168 * x2 + 25088 *
x3), None)
tmp7 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 7168 * x2 + 25088 *
x3), None)
tmp9 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 7168 * x2 + 25088 *
x3), None)
tmp11 = tl.load(in_ptr0 + (7168 + x0 + 1024 * x1 + 7168 * x2 + 25088 *
x3), None)
tmp13 = tl.load(in_ptr0 + (7680 + x0 + 1024 * x1 + 7168 * x2 + 25088 *
x3), None)
tmp15 = tl.load(in_ptr0 + (8192 + x0 + 1024 * x1 + 7168 * x2 + 25088 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, None)
tl.store(out_ptr1 + x4, tmp41, None)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x1 = xindex // 512
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp15 = tl.load(in_ptr2 + (256 * x1 + (-256 + x0)), tmp12,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (-256 + x0), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x2, tmp21, None)
@triton.jit
def triton_per_fused_convolution_mean_relu_22(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4000
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 1000
x1 = xindex // 1000
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1000 * r2 + 9000 * x1), rmask & xmask,
other=0.0)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(rmask & xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 9.0
tmp10 = tmp8 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_25(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_26(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 37632
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_27(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_28(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_29(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52, primals_53
) = args
args.clear()
assert_size_stride(primals_1, (96, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (96,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 96, 1, 1), (96, 1, 1, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (64, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (16, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (64, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (32, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_17, (32,), (1,))
assert_size_stride(primals_18, (128, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (128, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_21, (128,), (1,))
assert_size_stride(primals_22, (32, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_23, (32,), (1,))
assert_size_stride(primals_24, (128, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (48, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_29, (48,), (1,))
assert_size_stride(primals_30, (192, 48, 1, 1), (48, 1, 1, 1))
assert_size_stride(primals_31, (192,), (1,))
assert_size_stride(primals_32, (192, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_33, (192,), (1,))
assert_size_stride(primals_34, (48, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_35, (48,), (1,))
assert_size_stride(primals_36, (192, 48, 1, 1), (48, 1, 1, 1))
assert_size_stride(primals_37, (192,), (1,))
assert_size_stride(primals_38, (192, 48, 3, 3), (432, 9, 3, 1))
assert_size_stride(primals_39, (192,), (1,))
assert_size_stride(primals_40, (64, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (256, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_43, (256,), (1,))
assert_size_stride(primals_44, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (256,), (1,))
assert_size_stride(primals_46, (64, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_47, (64,), (1,))
assert_size_stride(primals_48, (256, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_49, (256,), (1,))
assert_size_stride(primals_50, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_51, (256,), (1,))
assert_size_stride(primals_52, (1000, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_53, (1000,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((96, 3, 7, 7), (147, 1, 21, 3), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(288, 49)](primals_1, buf0, 288, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(1024, 9)](primals_8, buf2, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((64, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(1024, 9)](primals_14, buf3, 1024, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_14
buf4 = empty_strided_cuda((128, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_3[grid(4096, 9)](primals_20, buf4, 4096, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf5 = empty_strided_cuda((128, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_3[grid(4096, 9)](primals_26, buf5, 4096, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_26
buf6 = empty_strided_cuda((192, 48, 3, 3), (432, 1, 144, 48), torch
.float32)
triton_poi_fused_4[grid(9216, 9)](primals_32, buf6, 9216, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_32
buf7 = empty_strided_cuda((192, 48, 3, 3), (432, 1, 144, 48), torch
.float32)
triton_poi_fused_4[grid(9216, 9)](primals_38, buf7, 9216, 9, XBLOCK
=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf8 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_5[grid(16384, 9)](primals_44, buf8, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_44
buf9 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_5[grid(16384, 9)](primals_50, buf9, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_50
buf10 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 96, 29, 29), (80736, 1, 2784, 96))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_6[grid(322944)](buf11, primals_2,
322944, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf12 = empty_strided_cuda((4, 96, 14, 14), (18816, 1, 1344, 96),
torch.float32)
buf13 = empty_strided_cuda((4, 96, 14, 14), (18816, 1, 1344, 96),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(75264)](buf11,
buf12, buf13, 75264, XBLOCK=512, num_warps=8, num_stages=1)
buf14 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 14, 14), (3136, 1, 224, 16))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_8[grid(12544)](buf15, primals_5,
12544, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf16 = extern_kernels.convolution(buf15, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 14, 14), (12544, 1, 896, 64))
buf17 = extern_kernels.convolution(buf15, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 14, 14), (12544, 1, 896, 64))
buf18 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128),
torch.float32)
triton_poi_fused_cat_9[grid(100352)](buf16, primals_7, buf17,
primals_9, buf18, 100352, XBLOCK=512, num_warps=8, num_stages=1)
buf19 = extern_kernels.convolution(buf18, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 16, 14, 14), (3136, 1, 224, 16))
buf20 = buf19
del buf19
triton_poi_fused_convolution_relu_8[grid(12544)](buf20, primals_11,
12544, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf21 = extern_kernels.convolution(buf20, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 14, 14), (12544, 1, 896, 64))
buf22 = extern_kernels.convolution(buf20, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 14, 14), (12544, 1, 896, 64))
buf23 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128),
torch.float32)
triton_poi_fused_cat_9[grid(100352)](buf21, primals_13, buf22,
primals_15, buf23, 100352, XBLOCK=512, num_warps=8, num_stages=1)
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 32, 14, 14), (6272, 1, 448, 32))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_10[grid(25088)](buf25, primals_17,
25088, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf26 = extern_kernels.convolution(buf25, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 128, 14, 14), (25088, 1, 1792, 128))
buf27 = extern_kernels.convolution(buf25, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 128, 14, 14), (25088, 1, 1792, 128))
buf28 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256),
torch.float32)
triton_poi_fused_cat_11[grid(200704)](buf26, primals_19, buf27,
primals_21, buf28, 200704, XBLOCK=512, num_warps=8, num_stages=1)
buf29 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256),
torch.float32)
buf30 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_12[grid(50176)](buf28,
buf29, buf30, 50176, XBLOCK=256, num_warps=4, num_stages=1)
buf31 = extern_kernels.convolution(buf29, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 32, 7, 7), (1568, 1, 224, 32))
buf32 = buf31
del buf31
triton_poi_fused_convolution_relu_13[grid(6272)](buf32, primals_23,
6272, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf33 = extern_kernels.convolution(buf32, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 128, 7, 7), (6272, 1, 896, 128))
buf34 = extern_kernels.convolution(buf32, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 7, 7), (6272, 1, 896, 128))
buf35 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256),
torch.float32)
triton_poi_fused_cat_14[grid(50176)](buf33, primals_25, buf34,
primals_27, buf35, 50176, XBLOCK=512, num_warps=4, num_stages=1)
buf36 = extern_kernels.convolution(buf35, primals_28, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 48, 7, 7), (2352, 1, 336, 48))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_15[grid(9408)](buf37, primals_29,
9408, XBLOCK=256, num_warps=4, num_stages=1)
del primals_29
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf39 = extern_kernels.convolution(buf37, buf6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf40 = empty_strided_cuda((4, 384, 7, 7), (18816, 1, 2688, 384),
torch.float32)
triton_poi_fused_cat_16[grid(75264)](buf38, primals_31, buf39,
primals_33, buf40, 75264, XBLOCK=512, num_warps=8, num_stages=1)
buf41 = extern_kernels.convolution(buf40, primals_34, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 48, 7, 7), (2352, 1, 336, 48))
buf42 = buf41
del buf41
triton_poi_fused_convolution_relu_15[grid(9408)](buf42, primals_35,
9408, XBLOCK=256, num_warps=4, num_stages=1)
del primals_35
buf43 = extern_kernels.convolution(buf42, primals_36, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf44 = extern_kernels.convolution(buf42, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 192, 7, 7), (9408, 1, 1344, 192))
buf45 = empty_strided_cuda((4, 384, 7, 7), (18816, 1, 2688, 384),
torch.float32)
triton_poi_fused_cat_16[grid(75264)](buf43, primals_37, buf44,
primals_39, buf45, 75264, XBLOCK=512, num_warps=8, num_stages=1)
buf46 = extern_kernels.convolution(buf45, primals_40, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 64, 7, 7), (3136, 1, 448, 64))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_17[grid(12544)](buf47, primals_41,
12544, XBLOCK=256, num_warps=4, num_stages=1)
del primals_41
buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 256, 7, 7), (12544, 1, 1792, 256))
buf49 = extern_kernels.convolution(buf47, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 256, 7, 7), (12544, 1, 1792, 256))
buf50 = empty_strided_cuda((4, 512, 7, 7), (25088, 1, 3584, 512),
torch.float32)
triton_poi_fused_cat_18[grid(100352)](buf48, primals_43, buf49,
primals_45, buf50, 100352, XBLOCK=512, num_warps=8, num_stages=1)
buf51 = empty_strided_cuda((4, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
buf52 = empty_strided_cuda((4, 512, 3, 3), (4608, 1, 1536, 512),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_19[grid(18432)](buf50,
buf51, buf52, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf53 = extern_kernels.convolution(buf51, primals_46, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 64, 3, 3), (576, 1, 192, 64))
buf54 = buf53
del buf53
triton_poi_fused_convolution_relu_20[grid(2304)](buf54, primals_47,
2304, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
buf55 = extern_kernels.convolution(buf54, primals_48, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 256, 3, 3), (2304, 1, 768, 256))
buf56 = extern_kernels.convolution(buf54, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 256, 3, 3), (2304, 1, 768, 256))
buf57 = empty_strided_cuda((4, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_cat_21[grid(18432)](buf55, primals_49, buf56,
primals_51, buf57, 18432, XBLOCK=256, num_warps=4, num_stages=1)
buf58 = extern_kernels.convolution(buf57, primals_52, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 1000, 3, 3), (9000, 1, 3000, 1000))
buf59 = empty_strided_cuda((4, 1000, 1, 1), (1000, 1, 4000, 4000),
torch.float32)
buf60 = reinterpret_tensor(buf59, (4, 1000, 1, 1), (1000, 1, 1, 1), 0)
del buf59
triton_per_fused_convolution_mean_relu_22[grid(4000)](buf60, buf58,
primals_53, 4000, 9, XBLOCK=32, num_warps=4, num_stages=1)
buf61 = empty_strided_cuda((4, 1000, 3, 3), (9000, 1, 3000, 1000),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_23[grid(36000)](
buf58, primals_53, buf61, 36000, XBLOCK=256, num_warps=4,
num_stages=1)
del buf58
del primals_53
buf62 = empty_strided_cuda((4, 256, 3, 3), (2304, 1, 768, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_24[grid(9216)](
buf56, primals_51, buf62, 9216, XBLOCK=128, num_warps=4,
num_stages=1)
del buf56
del primals_51
buf63 = empty_strided_cuda((4, 256, 3, 3), (2304, 1, 768, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_24[grid(9216)](
buf55, primals_49, buf63, 9216, XBLOCK=128, num_warps=4,
num_stages=1)
del buf55
del primals_49
buf64 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_25[grid(50176)](
buf49, primals_45, buf64, 50176, XBLOCK=512, num_warps=4,
num_stages=1)
del buf49
del primals_45
buf65 = empty_strided_cuda((4, 256, 7, 7), (12544, 1, 1792, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_25[grid(50176)](
buf48, primals_43, buf65, 50176, XBLOCK=512, num_warps=4,
num_stages=1)
del buf48
del primals_43
buf66 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_26[grid(37632)](
buf44, primals_39, buf66, 37632, XBLOCK=512, num_warps=4,
num_stages=1)
del buf44
del primals_39
buf67 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_26[grid(37632)](
buf43, primals_37, buf67, 37632, XBLOCK=512, num_warps=4,
num_stages=1)
del buf43
del primals_37
buf68 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_26[grid(37632)](
buf39, primals_33, buf68, 37632, XBLOCK=512, num_warps=4,
num_stages=1)
del buf39
del primals_33
buf69 = empty_strided_cuda((4, 192, 7, 7), (9408, 1, 1344, 192),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_26[grid(37632)](
buf38, primals_31, buf69, 37632, XBLOCK=512, num_warps=4,
num_stages=1)
del buf38
del primals_31
buf70 = empty_strided_cuda((4, 128, 7, 7), (6272, 1, 896, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_27[grid(25088)](
buf34, primals_27, buf70, 25088, XBLOCK=128, num_warps=4,
num_stages=1)
del buf34
del primals_27
buf71 = empty_strided_cuda((4, 128, 7, 7), (6272, 1, 896, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_27[grid(25088)](
buf33, primals_25, buf71, 25088, XBLOCK=128, num_warps=4,
num_stages=1)
del buf33
del primals_25
buf72 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_28[grid(100352)](
buf27, primals_21, buf72, 100352, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf27
del primals_21
buf73 = empty_strided_cuda((4, 128, 14, 14), (25088, 1, 1792, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_28[grid(100352)](
buf26, primals_19, buf73, 100352, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf26
del primals_19
buf74 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_29[grid(50176)](
buf22, primals_15, buf74, 50176, XBLOCK=256, num_warps=4,
num_stages=1)
del buf22
del primals_15
buf75 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_29[grid(50176)](
buf21, primals_13, buf75, 50176, XBLOCK=256, num_warps=4,
num_stages=1)
del buf21
del primals_13
buf76 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_29[grid(50176)](
buf17, primals_9, buf76, 50176, XBLOCK=256, num_warps=4,
num_stages=1)
del buf17
del primals_9
buf77 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_29[grid(50176)](
buf16, primals_7, buf77, 50176, XBLOCK=256, num_warps=4,
num_stages=1)
del buf16
del primals_7
return (buf60, buf0, buf1, primals_4, primals_6, buf2, primals_10,
primals_12, buf3, primals_16, primals_18, buf4, primals_22,
primals_24, buf5, primals_28, primals_30, buf6, primals_34,
primals_36, buf7, primals_40, primals_42, buf8, primals_46,
primals_48, buf9, primals_52, buf11, buf12, buf13, buf15, buf18,
buf20, buf23, buf25, buf28, buf29, buf30, buf32, buf35, buf37,
buf40, buf42, buf45, buf47, buf50, buf51, buf52, buf54, buf57,
buf61, buf62, buf63, buf64, buf65, buf66, buf67, buf68, buf69,
buf70, buf71, buf72, buf73, buf74, buf75, buf76, buf77)
class GramMatrix(nn.Module):
def forward(self, x):
b, c, h, w = x.size()
F = x.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
class GramDiag(nn.Module):
"""
docstring for GramDiag
"""
def __init__(self, gram_diagonal_squared=False):
super().__init__()
self.__gram_diagonal_squared = gram_diagonal_squared
def forward(self, x):
b, c, h, w = x.size()
x = x.view(b, c, 1, h * w)
gram_diag = None
for b in range(x.size(0)):
if self.__gram_diagonal_squared:
z = torch.bmm(x[b] * x[b], (x[b] * x[b]).transpose(2, 1))
else:
z = torch.bmm(x[b], x[b].transpose(2, 1))
if isinstance(gram_diag, torch.Tensor):
gram_diag = torch.cat(gram_diag, z)
else:
gram_diag = z
gram_diag = torch.squeeze(gram_diag).unsqueeze(0)
return gram_diag.div_(h * w)
class SqueezeNetNew(nn.Module):
def __init__(self, version=1.0, num_classes=1000, pretrained=False,
layer='', gram=False, gram_diag=False, gram_diagonal_squared=False):
super().__init__()
if version not in [1.0, 1.1]:
raise ValueError(
'Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'
.format(version=version))
self.num_classes = num_classes
if version == 1.0:
pytorch_squeeze = squeezenet1_0(pretrained, num_classes=num_classes
)
features_names = ['conv_1', 'relu_1', 'maxpool_1', 'fire_2',
'fire_3', 'fire_4', 'maxpool_4', 'fire_5', 'fire_6',
'fire_7', 'fire_8', 'maxpool_8', 'fire_9']
else:
pytorch_squeeze = squeezenet1_1(pretrained, num_classes=num_classes
)
features_names = ['conv_1', 'relu_1', 'maxpool_1', 'fire_2',
'fire_3', 'maxpool_3', 'fire_4', 'fire_5', 'maxpool_5',
'fire_6', 'fire_7', 'fire_8', 'fire_9']
classifier_names = ['drop_10', 'conv_10', 'relu_10', 'avgpool_10']
self.features = torch.nn.Sequential()
for name, module in zip(features_names, pytorch_squeeze.features):
self.features.add_module(name, copy.deepcopy(module))
if layer is name:
break
if len(features_names) == len(self.features
) and layer != features_names[-1]:
for name, module in zip(classifier_names, pytorch_squeeze.
classifier):
self.features.add_module(name, copy.deepcopy(module))
if layer is name:
break
del pytorch_squeeze
if gram:
self.features.add_module('gram matrix', GramMatrix())
elif gram_diag:
self.features.add_module('gram diagonal', GramDiag(
gram_diagonal_squared))
def forward(self, input_0):
primals_1 = self.features.conv_1.weight
primals_2 = self.features.conv_1.bias
primals_4 = self.features.fire_2.squeeze.weight
primals_5 = self.features.fire_2.squeeze.bias
primals_6 = self.features.fire_2.expand1x1.weight
primals_7 = self.features.fire_2.expand1x1.bias
primals_8 = self.features.fire_2.expand3x3.weight
primals_9 = self.features.fire_2.expand3x3.bias
primals_10 = self.features.fire_3.squeeze.weight
primals_11 = self.features.fire_3.squeeze.bias
primals_12 = self.features.fire_3.expand1x1.weight
primals_13 = self.features.fire_3.expand1x1.bias
primals_14 = self.features.fire_3.expand3x3.weight
primals_15 = self.features.fire_3.expand3x3.bias
primals_16 = self.features.fire_4.squeeze.weight
primals_17 = self.features.fire_4.squeeze.bias
primals_18 = self.features.fire_4.expand1x1.weight
primals_19 = self.features.fire_4.expand1x1.bias
primals_20 = self.features.fire_4.expand3x3.weight
primals_21 = self.features.fire_4.expand3x3.bias
primals_22 = self.features.fire_5.squeeze.weight
primals_23 = self.features.fire_5.squeeze.bias
primals_24 = self.features.fire_5.expand1x1.weight
primals_25 = self.features.fire_5.expand1x1.bias
primals_26 = self.features.fire_5.expand3x3.weight
primals_27 = self.features.fire_5.expand3x3.bias
primals_28 = self.features.fire_6.squeeze.weight
primals_29 = self.features.fire_6.squeeze.bias
primals_30 = self.features.fire_6.expand1x1.weight
primals_31 = self.features.fire_6.expand1x1.bias
primals_32 = self.features.fire_6.expand3x3.weight
primals_33 = self.features.fire_6.expand3x3.bias
primals_34 = self.features.fire_7.squeeze.weight
primals_35 = self.features.fire_7.squeeze.bias
primals_36 = self.features.fire_7.expand1x1.weight
primals_37 = self.features.fire_7.expand1x1.bias
primals_38 = self.features.fire_7.expand3x3.weight
primals_39 = self.features.fire_7.expand3x3.bias
primals_40 = self.features.fire_8.squeeze.weight
primals_41 = self.features.fire_8.squeeze.bias
primals_42 = self.features.fire_8.expand1x1.weight
primals_43 = self.features.fire_8.expand1x1.bias
primals_44 = self.features.fire_8.expand3x3.weight
primals_45 = self.features.fire_8.expand3x3.bias
primals_46 = self.features.fire_9.squeeze.weight
primals_47 = self.features.fire_9.squeeze.bias
primals_48 = self.features.fire_9.expand1x1.weight
primals_49 = self.features.fire_9.expand1x1.bias
primals_50 = self.features.fire_9.expand3x3.weight
primals_51 = self.features.fire_9.expand3x3.bias
primals_52 = self.features.conv_10.weight
primals_53 = self.features.conv_10.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53])
return output[0]
| matherm/ummon3 | SqueezeNet | false | 7,339 | [
"BSD-3-Clause"
] | 1 | 08476d21ce17cc95180525d48202a1690dfc8a08 | https://github.com/matherm/ummon3/tree/08476d21ce17cc95180525d48202a1690dfc8a08 | import copy
import torch
import torch.nn as nn
import torch.utils.data
from torchvision.models.squeezenet import squeezenet1_0
from torchvision.models.squeezenet import squeezenet1_1
import torch.nn.modules.activation
class GramMatrix(nn.Module):
def forward(self, x):
b, c, h, w = x.size()
F = x.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
class GramDiag(nn.Module):
"""
docstring for GramDiag
"""
def __init__(self, gram_diagonal_squared=False):
super().__init__()
self.__gram_diagonal_squared = gram_diagonal_squared
def forward(self, x):
b, c, h, w = x.size()
x = x.view(b, c, 1, h * w)
gram_diag = None
for b in range(x.size(0)):
if self.__gram_diagonal_squared:
z = torch.bmm(x[b] * x[b], (x[b] * x[b]).transpose(2, 1))
else:
z = torch.bmm(x[b], x[b].transpose(2, 1))
if isinstance(gram_diag, torch.Tensor):
gram_diag = torch.cat(gram_diag, z)
else:
gram_diag = z
gram_diag = torch.squeeze(gram_diag).unsqueeze(0)
return gram_diag.div_(h * w)
class Model(nn.Module):
def __init__(self, version=1.0, num_classes=1000, pretrained=False,
layer='', gram=False, gram_diag=False, gram_diagonal_squared=False):
super().__init__()
if version not in [1.0, 1.1]:
raise ValueError(
'Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'
.format(version=version))
self.num_classes = num_classes
if version == 1.0:
pytorch_squeeze = squeezenet1_0(pretrained, num_classes=num_classes
)
features_names = ['conv_1', 'relu_1', 'maxpool_1', 'fire_2',
'fire_3', 'fire_4', 'maxpool_4', 'fire_5', 'fire_6',
'fire_7', 'fire_8', 'maxpool_8', 'fire_9']
else:
pytorch_squeeze = squeezenet1_1(pretrained, num_classes=num_classes
)
features_names = ['conv_1', 'relu_1', 'maxpool_1', 'fire_2',
'fire_3', 'maxpool_3', 'fire_4', 'fire_5', 'maxpool_5',
'fire_6', 'fire_7', 'fire_8', 'fire_9']
classifier_names = ['drop_10', 'conv_10', 'relu_10', 'avgpool_10']
self.features = torch.nn.Sequential()
for name, module in zip(features_names, pytorch_squeeze.features):
self.features.add_module(name, copy.deepcopy(module))
if layer is name:
break
if len(features_names) == len(self.features
) and layer != features_names[-1]:
for name, module in zip(classifier_names, pytorch_squeeze.
classifier):
self.features.add_module(name, copy.deepcopy(module))
if layer is name:
break
del pytorch_squeeze
if gram:
self.features.add_module('gram matrix', GramMatrix())
elif gram_diag:
self.features.add_module('gram diagonal', GramDiag(
gram_diagonal_squared))
def forward(self, x):
return self.features(x)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.